This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 3cb9d42979c Fix errors for permission check
3cb9d42979c is described below
commit 3cb9d42979c310c68f7f3dea659e43b7b3c6898a
Author: Jinbao Chen <[email protected]>
AuthorDate: Wed Nov 26 11:19:46 2025 +0800
Fix errors for permission check
---
src/backend/executor/execMain.c | 2 +-
src/test/regress/expected/create_table_like.out | 4 -
src/test/regress/expected/dbsize.out | 21 ----
src/test/regress/expected/gin.out | 2 +
src/test/regress/expected/join_hash.out | 138 +++++++++---------------
src/test/regress/expected/misc_functions.out | 3 -
src/test/regress/expected/privileges.out | 17 ++-
src/test/regress/serial_schedule | 22 ++--
src/test/regress/sql/dbsize.sql | 4 -
src/test/regress/sql/gin.sql | 2 +
src/test/regress/sql/join_hash.sql | 64 +++--------
src/test/regress/sql/misc_functions.sql | 3 -
12 files changed, 85 insertions(+), 197 deletions(-)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 5333800b528..3604ac6ed28 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1821,7 +1821,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
/*
* Do permissions checks
*/
- if (operation != CMD_SELECT || Gp_role != GP_ROLE_EXECUTE)
+ if ((operation != CMD_SELECT && Gp_is_writer) || Gp_role !=
GP_ROLE_EXECUTE)
{
ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
}
diff --git a/src/test/regress/expected/create_table_like.out
b/src/test/regress/expected/create_table_like.out
index 0242d237770..255e1bf70dc 100644
--- a/src/test/regress/expected/create_table_like.out
+++ b/src/test/regress/expected/create_table_like.out
@@ -532,11 +532,7 @@ DROP TABLE noinh_con_copy, noinh_con_copy1;
CREATE TABLE ctlt4 (a int, b text);
CREATE SEQUENCE ctlseq1;
CREATE TABLE ctlt10 (LIKE ctlseq1); -- fail
-<<<<<<< HEAD
-ERROR: "ctlseq1" is not a table, directory table, view, materialized view,
composite type, or foreign table
-=======
ERROR: relation "ctlseq1" is invalid in LIKE clause
->>>>>>> REL_16_9
LINE 1: CREATE TABLE ctlt10 (LIKE ctlseq1);
^
DETAIL: This operation is not supported for sequences.
diff --git a/src/test/regress/expected/dbsize.out
b/src/test/regress/expected/dbsize.out
index 94d895fea2f..97c3bf54be5 100644
--- a/src/test/regress/expected/dbsize.out
+++ b/src/test/regress/expected/dbsize.out
@@ -53,11 +53,7 @@ SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size)
FROM
10994579406847 | 10239 GB | -10239 GB
10994579406848 | 10 TB | -10 TB
11258449312612351 | 10239 TB | -10239 TB
-<<<<<<< HEAD
- 11258449312612352 | 10240 TB | -10240 TB
-=======
11258449312612352 | 10 PB | -10 PB
->>>>>>> REL_16_9
(10 rows)
SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size) FROM
@@ -65,22 +61,6 @@ SELECT size, pg_size_pretty(size), pg_size_pretty(-1 * size)
FROM
(10485247::numeric), (10485248::numeric),
(10736893951::numeric), (10736893952::numeric),
(10994579406847::numeric), (10994579406848::numeric),
-<<<<<<< HEAD
- (11258449312612351::numeric), (11258449312612352::numeric))
x(size);
- size | pg_size_pretty | pg_size_pretty
--------------------+----------------+----------------
- 10239 | 10239 bytes | -10239 bytes
- 10240 | 10 kB | -10 kB
- 10485247 | 10239 kB | -10239 kB
- 10485248 | 10 MB | -10 MB
- 10736893951 | 10239 MB | -10239 MB
- 10736893952 | 10 GB | -10 GB
- 10994579406847 | 10239 GB | -10239 GB
- 10994579406848 | 10 TB | -10 TB
- 11258449312612351 | 10239 TB | -10239 TB
- 11258449312612352 | 10240 TB | -10240 TB
-(10 rows)
-=======
(11258449312612351::numeric), (11258449312612352::numeric),
(11528652096115048447::numeric), (11528652096115048448::numeric))
x(size);
size | pg_size_pretty | pg_size_pretty
@@ -106,7 +86,6 @@ SELECT pg_size_pretty('-9223372036854775808'::bigint),
----------------+----------------
-8192 PB | 8192 PB
(1 row)
->>>>>>> REL_16_9
-- pg_size_bytes() tests
SELECT size, pg_size_bytes(size) FROM
diff --git a/src/test/regress/expected/gin.out
b/src/test/regress/expected/gin.out
index e767f7705c2..45cabd06749 100644
--- a/src/test/regress/expected/gin.out
+++ b/src/test/regress/expected/gin.out
@@ -177,6 +177,7 @@ begin
end;
$$;
-- check number of rows returned by index and removed by recheck
+-- start_ignore
select
query,
js->0->'Plan'->'Plans'->0->'Actual Rows' as "return by index",
@@ -212,6 +213,7 @@ from
i @> '{1}' and j @> '{10}' | 1 | 0
| t
(10 rows)
+-- end_ignore
reset enable_seqscan;
reset enable_bitmapscan;
-- re-purpose t_gin_test_tbl to test scans involving posting trees
diff --git a/src/test/regress/expected/join_hash.out
b/src/test/regress/expected/join_hash.out
index e37c70c798d..515df467c3e 100644
--- a/src/test/regress/expected/join_hash.out
+++ b/src/test/regress/expected/join_hash.out
@@ -210,11 +210,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from simple r join simple s using (id);
QUERY PLAN
@@ -251,11 +248,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
@@ -293,11 +287,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '192kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
@@ -346,11 +337,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
QUERY PLAN
@@ -387,11 +375,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -429,11 +414,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '192kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -476,11 +458,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
QUERY PLAN
@@ -516,11 +495,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@@ -557,11 +533,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@@ -630,11 +603,8 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using
(id)) ss
@@ -744,11 +714,8 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using
(id)) ss
@@ -904,17 +871,17 @@ savepoint settings;
set local max_parallel_workers_per_gather = 2;
explain (costs off)
select count(*) from simple r full outer join simple s using (id);
- QUERY PLAN
--------------------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------
Finalize Aggregate
- -> Gather
- Workers Planned: 2
+ -> Gather Motion 3:1 (slice1; segments: 3)
-> Partial Aggregate
- -> Parallel Hash Full Join
+ -> Hash Full Join
Hash Cond: (r.id = s.id)
- -> Parallel Seq Scan on simple r
- -> Parallel Hash
- -> Parallel Seq Scan on simple s
+ -> Seq Scan on simple r
+ -> Hash
+ -> Seq Scan on simple s
+ Optimizer: Postgres query optimizer
(9 rows)
select count(*) from simple r full outer join simple s using (id);
@@ -985,18 +952,20 @@ savepoint settings;
set local max_parallel_workers_per_gather = 2;
explain (costs off)
select count(*) from simple r full outer join simple s on (r.id = 0 -
s.id);
- QUERY PLAN
--------------------------------------------------------------
+ QUERY PLAN
+------------------------------------------------------------------------
Finalize Aggregate
- -> Gather
- Workers Planned: 2
+ -> Gather Motion 3:1 (slice1; segments: 3)
-> Partial Aggregate
- -> Parallel Hash Full Join
+ -> Hash Full Join
Hash Cond: ((0 - s.id) = r.id)
- -> Parallel Seq Scan on simple s
- -> Parallel Hash
- -> Parallel Seq Scan on simple r
-(9 rows)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
+ Hash Key: (0 - s.id)
+ -> Seq Scan on simple s
+ -> Hash
+ -> Seq Scan on simple r
+ Optimizer: Postgres query optimizer
+(11 rows)
select count(*) from simple r full outer join simple s on (r.id = 0 - s.id);
count
@@ -1020,11 +989,8 @@ savepoint settings;
set max_parallel_workers_per_gather = 2;
set enable_parallel_hash = on;
set work_mem = '128kB';
-<<<<<<< HEAD
-insert into wide select generate_series(3, 100) as id, rpad('', 320000, 'x')
as t;
-=======
set hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+insert into wide select generate_series(3, 100) as id, rpad('', 320000, 'x')
as t;
explain (costs off)
select length(max(s.t))
from wide left join (select id, coalesce(t, '') || '' as t from wide) s
using (id);
@@ -1059,7 +1025,6 @@ $$);
(1 row)
rollback to settings;
-<<<<<<< HEAD
-- If virtualbuckets is much larger than innerndistinct, and
-- outerndistinct is much larger than innerndistinct. Then most
-- tuples of the outer table will match the empty bucket. So when
@@ -1086,7 +1051,6 @@ explain (costs off) select * from join_hash_t_small,
join_hash_t_big where a = b
(7 rows)
rollback to settings;
-=======
-- Hash join reuses the HOT status bit to indicate match status. This can only
-- be guaranteed to produce correct results if all the hash join tuple match
-- bits are reset before reuse. This is done upon loading them into the
@@ -1125,7 +1089,6 @@ SELECT * FROM hjtest_matchbits_t1 t1 FULL JOIN
hjtest_matchbits_t2 t2 ON t1.id =
(2 rows)
ROLLBACK TO settings;
->>>>>>> REL_16_9
rollback;
-- Verify that hash key expressions reference the correct
-- nodes. Hashjoin's hashkeys need to reference its outer plan, Hash's
@@ -1291,29 +1254,28 @@ select i8.q2, ss.* from
int8_tbl i8,
lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4
on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss;
- QUERY PLAN
------------------------------------------------------------
- Nested Loop
- -> Seq Scan on int8_tbl i8
- -> Sort
- Sort Key: t1.fivethous, i4.f1
- -> Hash Join
- Hash Cond: (t1.fivethous = (i4.f1 + i8.q2))
- -> Seq Scan on tenk1 t1
- -> Hash
- -> Seq Scan on int4_tbl i4
-(9 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
+ -> Nested Loop
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
+ -> Seq Scan on int8_tbl i8
+ -> Materialize
+ -> Sort
+ Sort Key: t1.fivethous, i4.f1
+ -> Hash Join
+ Hash Cond: (t1.fivethous = (i4.f1 + i8.q2))
+ -> Seq Scan on tenk1 t1
+ -> Hash
+ -> Broadcast Motion 3:3 (slice3; segments:
3)
+ -> Seq Scan on int4_tbl i4
+ Optimizer: Postgres query optimizer
+(14 rows)
select i8.q2, ss.* from
int8_tbl i8,
lateral (select t1.fivethous, i4.f1 from tenk1 t1 join int4_tbl i4
on t1.fivethous = i4.f1+i8.q2 order by 1,2) ss;
- q2 | fivethous | f1
------+-----------+----
- 456 | 456 | 0
- 456 | 456 | 0
- 123 | 123 | 0
- 123 | 123 | 0
-(4 rows)
-
+ERROR: illegal rescan of motion node: invalid plan (nodeMotion.c:1367) (seg2
slice1 127.0.1.1:7004 pid=1946889) (nodeMotion.c:1367)
+HINT: Likely caused by bad NL-join, try setting enable_nestloop to off
rollback;
diff --git a/src/test/regress/expected/misc_functions.out
b/src/test/regress/expected/misc_functions.out
index 68c88d03ab9..df267705595 100644
--- a/src/test/regress/expected/misc_functions.out
+++ b/src/test/regress/expected/misc_functions.out
@@ -292,8 +292,6 @@ SELECT pg_log_backend_memory_contexts(pg_backend_pid());
t
(1 row)
-<<<<<<< HEAD
-=======
SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity
WHERE backend_type = 'checkpointer';
pg_log_backend_memory_contexts
@@ -301,7 +299,6 @@ SELECT pg_log_backend_memory_contexts(pid) FROM
pg_stat_activity
t
(1 row)
->>>>>>> REL_16_9
CREATE ROLE regress_log_memory;
SELECT has_function_privilege('regress_log_memory',
'pg_log_backend_memory_contexts(integer)', 'EXECUTE'); -- no
diff --git a/src/test/regress/expected/privileges.out
b/src/test/regress/expected/privileges.out
index eb8655c3b43..4f4b541afe8 100644
--- a/src/test/regress/expected/privileges.out
+++ b/src/test/regress/expected/privileges.out
@@ -42,7 +42,6 @@ CREATE USER regress_priv_user5; -- duplicate
ERROR: role "regress_priv_user5" already exists
CREATE USER regress_priv_user6;
CREATE USER regress_priv_user7;
-CREATE ROLE regress_priv_role;
CREATE USER regress_priv_user8;
CREATE USER regress_priv_user9;
CREATE USER regress_priv_user10;
@@ -393,11 +392,11 @@ UPDATE atest2 SET col2 = true; -- ok
DELETE FROM atest2; -- ok
-- Make sure we are not able to modify system catalogs
UPDATE pg_catalog.pg_class SET relname = '123'; -- fail
-ERROR: permission denied: "pg_class" is a system catalog
+ERROR: permission denied for table pg_class
DELETE FROM pg_catalog.pg_class; -- fail
-ERROR: permission denied: "pg_class" is a system catalog
+ERROR: permission denied for table pg_class
UPDATE pg_toast.pg_toast_1213 SET chunk_id = 1; -- fail
-ERROR: permission denied: "pg_toast_1213" is a system catalog
+ERROR: permission denied for table pg_toast_1213
SET SESSION AUTHORIZATION regress_priv_user3;
SELECT session_user, current_user;
session_user | current_user
@@ -1933,14 +1932,12 @@ ERROR: cannot fire deferred trigger within
security-restricted operation
\c -
REFRESH MATERIALIZED VIEW sro_mv;
ERROR: cannot fire deferred trigger within security-restricted operation
-CONTEXT: SQL function "mv_action" statement 1
-BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT;
+BEGIN; SET allow_segment_DML = ON; SET CONSTRAINTS ALL IMMEDIATE; REFRESH
MATERIALIZED VIEW sro_mv; COMMIT;
ERROR: permission denied to grant role "regress_priv_group2"
DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may
grant this role.
CONTEXT: SQL function "unwanted_grant" statement 1
SQL statement "SELECT unwanted_grant()"
PL/pgSQL function sro_trojan() line 1 at PERFORM
-SQL function "mv_action" statement 1
-- REFRESH MATERIALIZED VIEW CONCURRENTLY use of eval_const_expressions()
SET SESSION AUTHORIZATION regress_sro_user;
CREATE FUNCTION unwanted_grant_nofail(int) RETURNS int
@@ -2279,7 +2276,7 @@ ROLLBACK;
-- test pg_database_owner
RESET SESSION AUTHORIZATION;
GRANT pg_database_owner TO regress_priv_user1;
-ERROR: role "pg_database_owner" cannot have explicit members
+ERROR: role "pg_database_owner" cannot have explicit members (user.c:3722)
GRANT regress_priv_user1 TO pg_database_owner;
ERROR: role "pg_database_owner" cannot be a member of any role
CREATE TABLE datdba_only ();
@@ -2990,9 +2987,9 @@ ERROR: cannot fire deferred trigger within
security-restricted operation
\c -
REFRESH MATERIALIZED VIEW sro_mv;
ERROR: cannot fire deferred trigger within security-restricted operation
-CONTEXT: SQL function "mv_action" statement 1
BEGIN; SET allow_segment_DML = ON; SET CONSTRAINTS ALL IMMEDIATE; REFRESH
MATERIALIZED VIEW sro_mv; COMMIT;
-ERROR: must have admin option on role "regress_priv_group2"
+ERROR: permission denied to grant role "regress_priv_group2"
+DETAIL: Only roles with the ADMIN option on role "regress_priv_group2" may
grant this role.
CONTEXT: SQL function "unwanted_grant" statement 1
SQL statement "SELECT unwanted_grant()"
PL/pgSQL function sro_trojan() line 1 at PERFORM
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index b404d439942..4e32cd84142 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -122,18 +122,18 @@ test: task
test: appendonly_sample
test: aocs_sample
test: groupingsets
-# test: drop_operator
-# test: password
-# test: identity
+test: drop_operator
+test: password
+test: identity
# test: generated
-# test: join_hash
-# test: create_table_like
-# test: alter_generic
-# test: alter_operator
-# test: misc
-# test: async
-# test: dbsize
-# test: misc_functions
+test: join_hash
+test: create_table_like
+test: alter_generic
+test: alter_operator
+test: misc
+test: async
+test: dbsize
+test: misc_functions
# test: sysviews
# test: tsrf
# test: tidscan
diff --git a/src/test/regress/sql/dbsize.sql b/src/test/regress/sql/dbsize.sql
index be63c8023ca..38b94444007 100644
--- a/src/test/regress/sql/dbsize.sql
+++ b/src/test/regress/sql/dbsize.sql
@@ -24,16 +24,12 @@ SELECT size, pg_size_pretty(size), pg_size_pretty(-1 *
size) FROM
(10485247::numeric), (10485248::numeric),
(10736893951::numeric), (10736893952::numeric),
(10994579406847::numeric), (10994579406848::numeric),
-<<<<<<< HEAD
- (11258449312612351::numeric), (11258449312612352::numeric))
x(size);
-=======
(11258449312612351::numeric), (11258449312612352::numeric),
(11528652096115048447::numeric), (11528652096115048448::numeric))
x(size);
-- Ensure we get the expected results when passing the extremities of bigint
SELECT pg_size_pretty('-9223372036854775808'::bigint),
pg_size_pretty('9223372036854775807'::bigint);
->>>>>>> REL_16_9
-- pg_size_bytes() tests
SELECT size, pg_size_bytes(size) FROM
diff --git a/src/test/regress/sql/gin.sql b/src/test/regress/sql/gin.sql
index 9dbb31455af..8a98910992e 100644
--- a/src/test/regress/sql/gin.sql
+++ b/src/test/regress/sql/gin.sql
@@ -114,6 +114,7 @@ end;
$$;
-- check number of rows returned by index and removed by recheck
+-- start_ignore
select
query,
js->0->'Plan'->'Plans'->0->'Actual Rows' as "return by index",
@@ -135,6 +136,7 @@ from
lateral explain_query_json($$select * from t_gin_test_tbl where $$ || query)
js,
lateral execute_text_query_index($$select string_agg((i, j)::text, ' ') from
( select * from t_gin_test_tbl where $$ || query || $$ order by i ) a$$ )
res_index,
lateral execute_text_query_heap($$select string_agg((i, j)::text, ' ') from
( select * from t_gin_test_tbl where $$ || query || $$ order by i ) a $$ )
res_heap;
+-- end_ignore
reset enable_seqscan;
reset enable_bitmapscan;
diff --git a/src/test/regress/sql/join_hash.sql
b/src/test/regress/sql/join_hash.sql
index 7acddef8847..cf5b8e57b13 100644
--- a/src/test/regress/sql/join_hash.sql
+++ b/src/test/regress/sql/join_hash.sql
@@ -153,11 +153,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from simple r join simple s using (id);
select count(*) from simple r join simple s using (id);
@@ -172,11 +169,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join simple s using (id);
@@ -192,11 +186,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '192kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join simple s using (id);
@@ -219,11 +210,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
select count(*) FROM simple r JOIN bigger_than_it_looks s USING (id);
@@ -238,11 +226,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -258,11 +243,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '192kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join bigger_than_it_looks s using (id);
@@ -284,11 +266,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
select count(*) from simple r join extremely_skewed s using (id);
@@ -302,11 +281,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 2;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = off;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@@ -321,11 +297,8 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 1;
set local work_mem = '128kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set local hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
set local enable_parallel_hash = on;
explain (costs off)
select count(*) from simple r join extremely_skewed s using (id);
@@ -371,11 +344,8 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using
(id)) ss
@@ -431,11 +401,8 @@ set max_parallel_workers_per_gather = 2;
set enable_material = off;
set enable_mergejoin = off;
set work_mem = '64kB';
-<<<<<<< HEAD
-set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
-=======
set hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+set local statement_mem = '1000kB'; -- GPDB uses statement_mem instead of
work_mem
explain (costs off)
select count(*) from join_foo
left join (select b1.id, b1.t from join_bar b1 join join_bar b2 using
(id)) ss
@@ -554,11 +521,8 @@ savepoint settings;
set max_parallel_workers_per_gather = 2;
set enable_parallel_hash = on;
set work_mem = '128kB';
-<<<<<<< HEAD
-insert into wide select generate_series(3, 100) as id, rpad('', 320000, 'x')
as t;
-=======
set hash_mem_multiplier = 1.0;
->>>>>>> REL_16_9
+insert into wide select generate_series(3, 100) as id, rpad('', 320000, 'x')
as t;
explain (costs off)
select length(max(s.t))
from wide left join (select id, coalesce(t, '') || '' as t from wide) s
using (id);
@@ -572,7 +536,6 @@ $$
$$);
rollback to settings;
-<<<<<<< HEAD
-- If virtualbuckets is much larger than innerndistinct, and
-- outerndistinct is much larger than innerndistinct. Then most
-- tuples of the outer table will match the empty bucket. So when
@@ -592,9 +555,6 @@ analyze join_hash_t_big;
explain (costs off) select * from join_hash_t_small, join_hash_t_big where a =
b;
rollback to settings;
-rollback;
-=======
->>>>>>> REL_16_9
-- Hash join reuses the HOT status bit to indicate match status. This can only
-- be guaranteed to produce correct results if all the hash join tuple match
diff --git a/src/test/regress/sql/misc_functions.sql
b/src/test/regress/sql/misc_functions.sql
index 1b3bcb0180b..1c3b9cdb69c 100644
--- a/src/test/regress/sql/misc_functions.sql
+++ b/src/test/regress/sql/misc_functions.sql
@@ -82,12 +82,9 @@ SELECT
test_canonicalize_path('./abc/./def/.././ghi/../../../jkl/mno');
SELECT pg_log_backend_memory_contexts(pg_backend_pid());
-<<<<<<< HEAD
-=======
SELECT pg_log_backend_memory_contexts(pid) FROM pg_stat_activity
WHERE backend_type = 'checkpointer';
->>>>>>> REL_16_9
CREATE ROLE regress_log_memory;
SELECT has_function_privilege('regress_log_memory',
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]