This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 6f1b5146a76 Fix some answer files
6f1b5146a76 is described below
commit 6f1b5146a7674c00647631c28ff6642c373a965c
Author: Jinbao Chen <[email protected]>
AuthorDate: Fri Dec 19 23:12:13 2025 +0800
Fix some answer files
---
.../pax_storage/src/test/regress/sql/gp_rules.sql | 2 +-
src/test/regress/expected/gp_array_agg.out | 10 +-
src/test/regress/expected/gp_rules.out | 12 +-
src/test/regress/expected/gp_runtime_filter.out | 16 +--
src/test/regress/expected/gp_transactions.out | 6 +-
src/test/regress/expected/incremental_analyze.out | 36 +++--
src/test/regress/expected/olap_group.out | 26 ++--
src/test/regress/expected/olap_window_seq.out | 2 +-
src/test/regress/expected/rangefuncs_cdb.out | 7 +-
src/test/regress/expected/subselect_gp.out | 145 ++++++++++-----------
src/test/regress/expected/transient_types.out | 4 +-
src/test/regress/expected/with_clause.out | 10 +-
src/test/regress/expected/workfile/sort_spill.out | 6 +-
src/test/regress/pg_regress.c | 2 +
src/test/regress/sql/event_trigger_gp.sql | 6 +-
src/test/regress/sql/gp_rules.sql | 2 +-
src/test/regress/sql/gpcopy.sql | 8 +-
src/test/regress/sql/partition_ddl.sql | 8 +-
src/test/regress/sql/segspace.sql | 2 +-
src/test/regress/sql/sreh.sql | 28 ++--
src/test/regress/sql/transient_types.sql | 4 +-
21 files changed, 186 insertions(+), 156 deletions(-)
diff --git a/contrib/pax_storage/src/test/regress/sql/gp_rules.sql
b/contrib/pax_storage/src/test/regress/sql/gp_rules.sql
index 8006984a917..d6d722bc673 100644
--- a/contrib/pax_storage/src/test/regress/sql/gp_rules.sql
+++ b/contrib/pax_storage/src/test/regress/sql/gp_rules.sql
@@ -14,7 +14,7 @@ SELECT relkind FROM pg_class
SELECT 1 FROM gp_distribution_policy
WHERE localoid = 'table_to_view_test1'::regclass;
-DROP VIEW table_to_view_test1;
+DROP TABLE table_to_view_test1;
DROP TABLE table_to_view_test2;
-- Same for an Append-Only table. It is currently not supported.
diff --git a/src/test/regress/expected/gp_array_agg.out
b/src/test/regress/expected/gp_array_agg.out
index eccc62eef3b..4ae1f4b0b7d 100644
--- a/src/test/regress/expected/gp_array_agg.out
+++ b/src/test/regress/expected/gp_array_agg.out
@@ -210,16 +210,18 @@ explain (costs off) select * from v_pagg_test order by y;
-> GroupAggregate
Group Key: pagg_test.y
-> Sort
- Sort Key: pagg_test.y
+ Sort Key: pagg_test.y,
(((unnest(regexp_split_to_array((string_agg((pagg_test.x)::text, ','::text)),
','::text))))::integer)
-> Result
-> ProjectSet
- -> HashAggregate
+ -> Finalize HashAggregate
Group Key: pagg_test.y
-> Redistribute Motion 3:3 (slice2;
segments: 3)
Hash Key: pagg_test.y
- -> Seq Scan on pagg_test
+ -> Partial HashAggregate
+ Group Key: pagg_test.y
+ -> Seq Scan on pagg_test
Optimizer: Postgres query optimizer
-(14 rows)
+(16 rows)
-- Test array_agg(anyarray)
create table int_array_table (a int, arr int[]);
diff --git a/src/test/regress/expected/gp_rules.out
b/src/test/regress/expected/gp_rules.out
index 3de578b7a03..6df671a172e 100644
--- a/src/test/regress/expected/gp_rules.out
+++ b/src/test/regress/expected/gp_rules.out
@@ -8,12 +8,14 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using
column named 'a' as
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE rule "_RETURN" as on select to table_to_view_test1
do instead select * from table_to_view_test2;
+ERROR: relation "table_to_view_test1" cannot have ON SELECT rules
+DETAIL: This operation is not supported for tables.
-- relkind has been changed to 'v'
SELECT relkind FROM pg_class
WHERE oid = 'table_to_view_test1'::regclass;
relkind
---------
- v
+ r
(1 row)
-- distribution policy record has been deleted
@@ -21,9 +23,10 @@ SELECT 1 FROM gp_distribution_policy
WHERE localoid = 'table_to_view_test1'::regclass;
?column?
----------
-(0 rows)
+ 1
+(1 row)
-DROP VIEW table_to_view_test1;
+DROP TABLE table_to_view_test1;
DROP TABLE table_to_view_test2;
-- Same for an Append-Only table. It is currently not supported.
CREATE table aotable_to_view_test1 (a int) with (appendonly=true);
@@ -34,6 +37,7 @@ NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using
column named 'a' as
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make
sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE rule "_RETURN" as on select to aotable_to_view_test1
do instead select * from aotable_to_view_test2;
-ERROR: cannot convert non-heap table "aotable_to_view_test1" to a view
+ERROR: relation "aotable_to_view_test1" cannot have ON SELECT rules
+DETAIL: This operation is not supported for tables.
drop table aotable_to_view_test1;
drop table aotable_to_view_test2;
diff --git a/src/test/regress/expected/gp_runtime_filter.out
b/src/test/regress/expected/gp_runtime_filter.out
index cc1531a707c..1106001b9c2 100644
--- a/src/test/regress/expected/gp_runtime_filter.out
+++ b/src/test/regress/expected/gp_runtime_filter.out
@@ -438,13 +438,12 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
SELECT t1.c3 FROM t1, t2 W
-> Hash Join (actual rows=32 loops=1)
Hash Cond: (t1.c1 = t2.c1)
Extra Text: (seg0) Hash chain length 2.0 avg, 2 max, using 3 of
524288 buckets.
- -> Result (actual rows=16 loops=1)
- -> Seq Scan on t1 (actual rows=24 loops=1)
+ -> Seq Scan on t1 (actual rows=24 loops=1)
-> Hash (actual rows=6 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Seq Scan on t2 (actual rows=6 loops=1)
- Optimizer: Pivotal Optimizer (GPORCA)
-(10 rows)
+ Optimizer: Postgres query optimizer
+(9 rows)
SET gp_enable_runtime_filter_pushdown TO on;
EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF) SELECT t1.c3 FROM t1, t2
WHERE t1.c1 = t2.c1;
@@ -454,14 +453,13 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
SELECT t1.c3 FROM t1, t2 W
-> Hash Join (actual rows=32 loops=1)
Hash Cond: (t1.c1 = t2.c1)
Extra Text: (seg0) Hash chain length 2.0 avg, 2 max, using 3 of
524288 buckets.
- -> Result (actual rows=16 loops=1)
- -> Seq Scan on t1 (actual rows=16 loops=1)
- Rows Removed by Pushdown Runtime Filter: 8
+ -> Seq Scan on t1 (actual rows=17 loops=1)
+ Rows Removed by Pushdown Runtime Filter: 7
-> Hash (actual rows=6 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Seq Scan on t2 (actual rows=6 loops=1)
- Optimizer: Pivotal Optimizer (GPORCA)
-(11 rows)
+ Optimizer: Postgres query optimizer
+(10 rows)
RESET gp_enable_runtime_filter_pushdown;
DROP TABLE IF EXISTS t1;
diff --git a/src/test/regress/expected/gp_transactions.out
b/src/test/regress/expected/gp_transactions.out
index 066abc8269d..48a0574c95e 100644
--- a/src/test/regress/expected/gp_transactions.out
+++ b/src/test/regress/expected/gp_transactions.out
@@ -1,14 +1,16 @@
--
-- Transactions (GPDB-specific tests)
--
+\getenv abs_builddir PG_ABS_BUILDDIR
CREATE TEMPORARY TABLE temptest (a int);
INSERT INTO temptest VALUES (generate_series(1, 10));
CREATE TEMPORARY SEQUENCE tempseq;
SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY;
-- Make sure COPY works with temp tables during a READ ONLY transaction.
-COPY temptest TO '@abs_builddir@/results/xacttemp.data';
+\set filename :abs_builddir '/results/xacttemp.data'
+COPY temptest TO :'filename';
DELETE FROM temptest;
-COPY temptest FROM '@abs_builddir@/results/xacttemp.data';
+COPY temptest FROM :'filename';
SELECT * from temptest;
a
----
diff --git a/src/test/regress/expected/incremental_analyze.out
b/src/test/regress/expected/incremental_analyze.out
index 99646363b17..fd91f7442b8 100644
--- a/src/test/regress/expected/incremental_analyze.out
+++ b/src/test/regress/expected/incremental_analyze.out
@@ -1961,9 +1961,11 @@ HINT: The 'DISTRIBUTED BY' clause determines the
distribution of data. Make sur
INSERT INTO foo values (5),(15);
ANALYZE (verbose, rootpartition off) foo;
INFO: analyzing "public.foo_1_prt_2"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(229792, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_2" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215158, 400,
'f');
INFO: analyzing "public.foo_1_prt_1"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(229789, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_1" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215155, 400,
'f');
-- root should not have stats
SELECT count(*) from pg_statistic where starelid='foo'::regclass;
count
@@ -1985,16 +1987,20 @@ ANALYZE (verbose, fullscan on) foo;
INFO: analyzing "public.foo_1_prt_2"
INFO: Executing SQL: select pg_catalog.gp_hyperloglog_accum(a) from
public.foo_1_prt_2 as Ta
INFO: HLL FULL SCAN
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(229792, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_2" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215158, 400,
'f');
INFO: analyzing "public.foo_1_prt_1"
INFO: Executing SQL: select pg_catalog.gp_hyperloglog_accum(a) from
public.foo_1_prt_1 as Ta
INFO: HLL FULL SCAN
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(229789, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_1" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215155, 400,
'f');
ANALYZE (verbose, fullscan off) foo;
INFO: analyzing "public.foo_1_prt_2"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(229792, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_2" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215158, 400,
'f');
INFO: analyzing "public.foo_1_prt_1"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(229789, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_1" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215155, 400,
'f');
reset optimizer_analyze_root_partition;
-- Test merging of stats after the last partition is analyzed. Merging should
-- be done for root without taking a sample from root if one of the column
@@ -2008,10 +2014,12 @@ ALTER TABLE foo ALTER COLUMN c SET STATISTICS 0;
INSERT INTO foo SELECT i,i%2+1, NULL FROM generate_series(1,100)i;
ANALYZE VERBOSE foo_1_prt_1;
INFO: analyzing "public.foo_1_prt_1"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(24965, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_1" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215164, 400,
'f');
ANALYZE VERBOSE foo_1_prt_2;
INFO: analyzing "public.foo_1_prt_2"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(24971, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_2" inheritance tree --- this
inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215169, 400,
'f');
INFO: analyzing "public.foo" inheritance tree
SELECT tablename, attname, null_frac, n_distinct, most_common_vals,
most_common_freqs, histogram_bounds FROM pg_stats WHERE tablename like 'foo%'
ORDER BY attname,tablename;
tablename | attname | null_frac | n_distinct | most_common_vals |
most_common_freqs | histogram_bounds
@@ -2031,11 +2039,14 @@ create table foo (a int, b date) distributed by (a)
partition by range(b) (parti
insert into foo select a, '20210101'::date+a from (select
generate_series(1,80) a) t1;
analyze verbose foo;
INFO: analyzing "public.foo_1_prt_20210301"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(65906, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_20210301" inheritance tree ---
this inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215183, 400,
'f');
INFO: analyzing "public.foo_1_prt_20210201"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(65903, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_20210201" inheritance tree ---
this inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215180, 400,
'f');
INFO: analyzing "public.foo_1_prt_20210101"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(65900, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_20210101" inheritance tree ---
this inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215177, 400,
'f');
INFO: analyzing "public.foo" inheritance tree
-- we should see "analyzing "public.foo" inheritance tree" in the output below
begin;
@@ -2043,6 +2054,7 @@ truncate foo_1_prt_20210201;
insert into foo select a, '20210101'::date+a from (select
generate_series(31,40) a) t1;
analyze verbose foo_1_prt_20210201;
INFO: analyzing "public.foo_1_prt_20210201"
-INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(65903, 400,
'f');
+INFO: skipping analyze of "public.foo_1_prt_20210201" inheritance tree ---
this inheritance tree contains no child tables
+INFO: Executing SQL: select pg_catalog.gp_acquire_sample_rows(215180, 400,
'f');
INFO: analyzing "public.foo" inheritance tree
rollback;
diff --git a/src/test/regress/expected/olap_group.out
b/src/test/regress/expected/olap_group.out
index 01d91d60324..4c5f2361242 100755
--- a/src/test/regress/expected/olap_group.out
+++ b/src/test/regress/expected/olap_group.out
@@ -2877,11 +2877,11 @@ create view cube_view as select cn,vn,grouping(vn,cn)
from sale group by cube(cn
vn | integer | | | | plain |
grouping | integer | | | | plain |
View definition:
- SELECT sale.cn,
- sale.vn,
- GROUPING(sale.vn, sale.cn) AS "grouping"
+ SELECT cn,
+ vn,
+ GROUPING(vn, cn) AS "grouping"
FROM sale
- GROUP BY CUBE(sale.cn, sale.vn);
+ GROUP BY CUBE(cn, vn);
create view rollup_view as select cn,vn,pn,grouping(cn,vn,pn) from sale group
by rollup(cn),vn,pn;
\d+ rollup_view;
@@ -2893,12 +2893,12 @@ create view rollup_view as select
cn,vn,pn,grouping(cn,vn,pn) from sale group by
pn | integer | | | | plain |
grouping | integer | | | | plain |
View definition:
- SELECT sale.cn,
- sale.vn,
- sale.pn,
- GROUPING(sale.cn, sale.vn, sale.pn) AS "grouping"
+ SELECT cn,
+ vn,
+ pn,
+ GROUPING(cn, vn, pn) AS "grouping"
FROM sale
- GROUP BY ROLLUP(sale.cn), sale.vn, sale.pn;
+ GROUP BY ROLLUP(cn), vn, pn;
create view gs_view as select cn,vn,grouping(vn,cn) from sale group by
grouping sets ((vn), (cn), (), (cn,vn));
\d+ gs_view;
@@ -2909,11 +2909,11 @@ create view gs_view as select cn,vn,grouping(vn,cn)
from sale group by grouping
vn | integer | | | | plain |
grouping | integer | | | | plain |
View definition:
- SELECT sale.cn,
- sale.vn,
- GROUPING(sale.vn, sale.cn) AS "grouping"
+ SELECT cn,
+ vn,
+ GROUPING(vn, cn) AS "grouping"
FROM sale
- GROUP BY GROUPING SETS ((sale.vn), (sale.cn), (), (sale.cn, sale.vn));
+ GROUP BY GROUPING SETS ((vn), (cn), (), (cn, vn));
-- GROUP_ID function --
select pn, sum(qty), group_id() from sale group by rollup(pn);
diff --git a/src/test/regress/expected/olap_window_seq.out
b/src/test/regress/expected/olap_window_seq.out
index ae90f5b3c38..37e0fa880fa 100755
--- a/src/test/regress/expected/olap_window_seq.out
+++ b/src/test/regress/expected/olap_window_seq.out
@@ -8912,7 +8912,7 @@ create view distinct_windowagg_view as select
sum(distinct g/2) OVER (partition
--------+--------+-----------+----------+---------+---------+-------------
sum | bigint | | | | plain |
View definition:
- SELECT sum(DISTINCT g.g / 2) OVER (PARTITION BY (g.g / 4)) AS sum
+ SELECT sum(DISTINCT g / 2) OVER (PARTITION BY (g / 4)) AS sum
FROM generate_series(1, 5) g(g);
-- These are tests for pushing down filter predicates in window functions.
diff --git a/src/test/regress/expected/rangefuncs_cdb.out
b/src/test/regress/expected/rangefuncs_cdb.out
index 0bb3ec5ca36..f21b9a9d8fa 100644
--- a/src/test/regress/expected/rangefuncs_cdb.out
+++ b/src/test/regress/expected/rangefuncs_cdb.out
@@ -30,10 +30,11 @@ name not in ('enable_parallel',
enable_partitionwise_aggregate | off
enable_partitionwise_join | off
enable_password_profile | on
+ enable_presorted_aggregate | on
enable_seqscan | on
enable_sort | on
enable_tidscan | on
-(23 rows)
+(24 rows)
-- start_ignore
create schema rangefuncs_cdb;
@@ -197,7 +198,7 @@ $$ LANGUAGE plpgsql;
-- function in select clause
-- Fails: plpgsql does not support SFRM_Materialize
select foor(1);
-ERROR: set-valued function called in context that cannot accept a set
+ERROR: materialize mode required, but it is not allowed in this context
CONTEXT: PL/pgSQL function foor(integer) line 6 at RETURN NEXT
-- expanding columns in the select list
-- Fails: record type not registered
@@ -220,7 +221,7 @@ select * from foor(1) as (fooid int, f2 int);
-- function over function (executed on master)
-- Fails: plpgsql does not support SFRM_Materialize
select foor(fooid), * from foor(3) as (fooid int, f2 int);
-ERROR: set-valued function called in context that cannot accept a set
+ERROR: materialize mode required, but it is not allowed in this context
CONTEXT: PL/pgSQL function foor(integer) line 6 at RETURN NEXT
-- Joining with a table
select * from foo2, foor(3) z(fooid int, f2 int) where foo2.f2 = z.f2;
diff --git a/src/test/regress/expected/subselect_gp.out
b/src/test/regress/expected/subselect_gp.out
index b62bf04d803..36f064c8003 100644
--- a/src/test/regress/expected/subselect_gp.out
+++ b/src/test/regress/expected/subselect_gp.out
@@ -927,18 +927,17 @@ select * from csq_pullup t0 where 1= (select count(*)
from csq_pullup t1 where t
-- text, text
--
explain select * from csq_pullup t0 where not exists (select 1 from csq_pullup
t1 where t0.t=t1.t and t1.i = 1);
- QUERY PLAN
--------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (cost=1.02..2.07 rows=4 width=19)
- -> Hash Anti Join (cost=1.02..2.07 rows=2 width=19)
- Hash Cond: t0.t = t1.t
- -> Seq Scan on csq_pullup t0 (cost=0.00..1.01 rows=1 width=19)
- -> Hash (cost=1.01..1.01 rows=1 width=4)
- -> Seq Scan on csq_pullup t1 (cost=0.00..1.01 rows=1 width=4)
- Filter: t IS NOT NULL AND i = 1
- Settings: optimizer_segments=3
- Optimizer status: Postgres query optimizer
-(9 rows)
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3) (cost=1.02..2.09 rows=3 width=17)
+ -> Hash Right Anti Join (cost=1.02..2.05 rows=1 width=17)
+ Hash Cond: (t1.t = t0.t)
+ -> Seq Scan on csq_pullup t1 (cost=0.00..1.01 rows=1 width=4)
+ Filter: (i = 1)
+ -> Hash (cost=1.01..1.01 rows=1 width=17)
+ -> Seq Scan on csq_pullup t0 (cost=0.00..1.01 rows=1 width=17)
+ Optimizer: Postgres query optimizer
+(8 rows)
select * from csq_pullup t0 where not exists (select 1 from csq_pullup t1
where t0.t=t1.t and t1.i = 1);
t | n | i | v
@@ -951,19 +950,17 @@ select * from csq_pullup t0 where not exists (select 1
from csq_pullup t1 where
-- int, function(int)
--
explain select * from csq_pullup t0 where not exists (select 1 from csq_pullup
t1 where t0.i=t1.i + 1);
- QUERY PLAN
--------------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice2; segments: 3) (cost=1.09..2.15 rows=4 width=19)
- -> Hash Anti Join (cost=1.09..2.15 rows=2 width=19)
- Hash Cond: t0.i = (t1.i + 1)
- -> Seq Scan on csq_pullup t0 (cost=0.00..1.01 rows=1 width=19)
- -> Hash (cost=1.05..1.05 rows=1 width=4)
- -> Broadcast Motion 3:3 (slice1; segments: 3)
(cost=0.00..1.05 rows=1 width=4)
- -> Seq Scan on csq_pullup t1 (cost=0.00..1.01 rows=1
width=4)
- Filter: (i + 1) IS NOT NULL
- Settings: optimizer_segments=3
- Optimizer status: Postgres query optimizer
-(10 rows)
+ QUERY PLAN
+-------------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3) (cost=1.02..2.11 rows=3 width=17)
+ -> Hash Right Anti Join (cost=1.02..2.07 rows=1 width=17)
+ Hash Cond: ((t1.i + 1) = t0.i)
+ -> Broadcast Motion 3:3 (slice2; segments: 3) (cost=0.00..1.03
rows=1 width=4)
+ -> Seq Scan on csq_pullup t1 (cost=0.00..1.01 rows=1 width=4)
+ -> Hash (cost=1.01..1.01 rows=1 width=17)
+ -> Seq Scan on csq_pullup t0 (cost=0.00..1.01 rows=1 width=17)
+ Optimizer: Postgres query optimizer
+(8 rows)
select * from csq_pullup t0 where not exists (select 1 from csq_pullup t1
where t0.i=t1.i + 1);
t | n | i | v
@@ -987,15 +984,14 @@ analyze subselect_t2;
explain select * from subselect_t1 where x in (select y from subselect_t2);
QUERY PLAN
------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (cost=1.07..2.13 rows=4 width=4)
- -> Hash Semi Join (cost=1.07..2.13 rows=2 width=4)
- Hash Cond: subselect_t1.x = subselect_t2.y
- -> Seq Scan on subselect_t1 (cost=0.00..1.02 rows=1 width=4)
- -> Hash (cost=1.03..1.03 rows=1 width=4)
- -> Seq Scan on subselect_t2 (cost=0.00..1.03 rows=1 width=4)
- Settings: optimizer_segments=3
- Optimizer status: Postgres query optimizer
-(8 rows)
+ Gather Motion 3:1 (slice1; segments: 3) (cost=1.02..2.09 rows=3 width=4)
+ -> Hash Semi Join (cost=1.02..2.05 rows=1 width=4)
+ Hash Cond: (subselect_t1.x = subselect_t2.y)
+ -> Seq Scan on subselect_t1 (cost=0.00..1.01 rows=1 width=4)
+ -> Hash (cost=1.01..1.01 rows=1 width=4)
+ -> Seq Scan on subselect_t2 (cost=0.00..1.01 rows=1 width=4)
+ Optimizer: Postgres query optimizer
+(7 rows)
select * from subselect_t1 where x in (select y from subselect_t2);
x
@@ -1010,14 +1006,14 @@ select * from subselect_t1 where x in (select y from
subselect_t2);
explain select * from subselect_t1 where x in (select y from subselect_t2
union all select y from subselect_t2);
QUERY PLAN
---------------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice1; segments: 3) (cost=4.19..6.26 rows=4 width=4)
- -> Hash Semi Join (cost=4.19..6.26 rows=2 width=4)
- Hash Cond: subselect_t1.x = subselect_t2.y
- -> Seq Scan on subselect_t1 (cost=0.00..2.02 rows=1 width=4)
- -> Hash (cost=4.12..4.12 rows=2 width=4)
- -> Append (cost=0.00..4.06 rows=2 width=4)
- -> Seq Scan on subselect_t2 (cost=0.00..2.03 rows=1
width=4)
- -> Seq Scan on subselect_t2 subselect_t2_1
(cost=0.00..2.03 rows=1 width=4)
+ Gather Motion 3:1 (slice1; segments: 3) (cost=2.05..3.12 rows=3 width=4)
+ -> Hash Semi Join (cost=2.05..3.08 rows=1 width=4)
+ Hash Cond: (subselect_t1.x = subselect_t2.y)
+ -> Seq Scan on subselect_t1 (cost=0.00..1.01 rows=1 width=4)
+ -> Hash (cost=2.03..2.03 rows=2 width=4)
+ -> Append (cost=0.00..2.03 rows=2 width=4)
+ -> Seq Scan on subselect_t2 (cost=0.00..1.01 rows=1
width=4)
+ -> Seq Scan on subselect_t2 subselect_t2_1
(cost=0.00..1.01 rows=1 width=4)
Optimizer: Postgres query optimizer
(9 rows)
@@ -1580,19 +1576,19 @@ EXPLAIN SELECT '' AS three, f1, f2
FROM SUBSELECT_TBL
WHERE (f1, f2) NOT IN (SELECT f2, CAST(f3 AS int4) FROM SUBSELECT_TBL
WHERE f3 IS NOT NULL) ORDER BY 2,3;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------
- Gather Motion 3:1 (slice2; segments: 3)
(cost=10000000009.64..10000000009.64 rows=4 width=8)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------
+ Gather Motion 3:1 (slice1; segments: 3)
(cost=10000000004.35..10000000004.40 rows=3 width=40)
Merge Key: subselect_tbl.f1, subselect_tbl.f2
- -> Sort (cost=10000000009.64..10000000009.64 rows=2 width=8)
+ -> Sort (cost=10000000004.35..10000000004.35 rows=1 width=40)
Sort Key: subselect_tbl.f1, subselect_tbl.f2
- -> Nested Loop Left Anti Semi (Not-In) Join
(cost=10000000000.00..10000000009.61 rows=2 width=8)
- Join Filter: subselect_tbl.f1 = subselect_tbl_1.f2 AND
subselect_tbl.f2 = subselect_tbl_1.f3::integer
- -> Seq Scan on subselect_tbl (cost=0.00..3.08 rows=3 width=8)
- -> Materialize (cost=0.00..3.47 rows=7 width=12)
- -> Broadcast Motion 3:3 (slice1; segments: 3)
(cost=0.00..3.36 rows=7 width=12)
- -> Seq Scan on subselect_tbl subselect_tbl_1
(cost=0.00..3.08 rows=3 width=12)
- Filter: f3 IS NOT NULL
+ -> Nested Loop Left Anti Semi (Not-In) Join
(cost=10000000000.00..10000000004.34 rows=1 width=40)
+ Join Filter: ((subselect_tbl.f1 = subselect_tbl_1.f2) AND
(subselect_tbl.f2 = (subselect_tbl_1.f3)::integer))
+ -> Seq Scan on subselect_tbl (cost=0.00..1.03 rows=3 width=8)
+ -> Materialize (cost=0.00..1.15 rows=7 width=12)
+ -> Broadcast Motion 3:3 (slice2; segments: 3)
(cost=0.00..1.12 rows=7 width=12)
+ -> Seq Scan on subselect_tbl subselect_tbl_1
(cost=0.00..1.03 rows=2 width=12)
+ Filter: (f3 IS NOT NULL)
Optimizer: Postgres query optimizer
(12 rows)
@@ -1630,15 +1626,14 @@ EXPLAIN SELECT '' AS six, f1 AS "Correlated Field", f2
AS "Second Field"
Merge Key: upper.f1, upper.f2
-> Sort (cost=2.10..2.11 rows=2 width=8)
Sort Key: upper.f1, upper.f2
- -> Hash Semi Join (cost=3.11..6.25 rows=2 width=8)
- Hash Cond: upper.f1 = subselect_tbl.f1
- -> Seq Scan on subselect_tbl upper (cost=0.00..1.01 rows=1
width=8)
- -> Hash (cost=1.01..1.01 rows=1 width=8)
- -> Seq Scan on subselect_tbl (cost=0.00..1.01 rows=1
width=8)
- Filter: f1 = f2
- Settings: optimizer=off
- Optimizer status: Postgres query optimizer
-(12 rows)
+ -> Hash Semi Join (cost=1.05..2.09 rows=1 width=40)
+ Hash Cond: (upper.f1 = subselect_tbl.f1)
+ -> Seq Scan on subselect_tbl upper (cost=0.00..1.03 rows=3
width=8)
+ -> Hash (cost=1.03..1.03 rows=1 width=8)
+ -> Seq Scan on subselect_tbl (cost=0.00..1.03 rows=1
width=8)
+ Filter: (f1 = f2)
+ Optimizer: Postgres query optimizer
+(11 rows)
EXPLAIN SELECT '' AS six, f1 AS "Correlated Field", f3 AS "Second Field"
FROM SUBSELECT_TBL upper
@@ -1650,11 +1645,11 @@ EXPLAIN SELECT '' AS six, f1 AS "Correlated Field", f3
AS "Second Field"
Merge Key: upper.f1, upper.f3
-> Sort (cost=2.13..2.14 rows=2 width=12)
Sort Key: upper.f1, upper.f3
- -> Hash Semi Join (cost=3.36..6.52 rows=2 width=12)
- Hash Cond: upper.f1 = subselect_tbl.f2 AND upper.f2::double
precision = subselect_tbl.f3
- -> Seq Scan on subselect_tbl upper (cost=0.00..1.01 rows=1
width=16)
- -> Hash (cost=1.03..1.03 rows=1 width=12)
- -> Redistribute Motion 3:3 (slice1; segments: 3)
(cost=0.00..1.03 rows=1 width=12)
+ -> Hash Semi Join (cost=1.12..2.19 rows=2 width=44)
+ Hash Cond: (((upper.f2)::double precision = subselect_tbl.f3)
AND (upper.f1 = subselect_tbl.f2))
+ -> Seq Scan on subselect_tbl upper (cost=0.00..1.03 rows=3
width=16)
+ -> Hash (cost=1.08..1.08 rows=3 width=12)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
(cost=0.00..1.08 rows=3 width=12)
Hash Key: subselect_tbl.f2
-> Seq Scan on subselect_tbl (cost=0.00..1.01
rows=1 width=12)
Settings: optimizer=off
@@ -1677,7 +1672,7 @@ EXPLAIN SELECT '' AS six, f1 AS "Correlated Field", f3 AS
"Second Field"
-> Redistribute Motion 3:3 (slice2; segments: 3)
(cost=10000000000.00..10000000002.36 rows=1 width=12)
Hash Key: (RowIdExpr)
-> Nested Loop
(cost=10000000000.00..10000000002.34 rows=1 width=12)
- Join Filter: (((upper.f1 +
subselect_tbl.f2))::double precision = upper.f3)
+ Join Filter: (upper.f3 = ((upper.f1 +
subselect_tbl.f2))::double precision)
-> Broadcast Motion 3:3 (slice3; segments:
3) (cost=0.00..1.13 rows=8 width=12)
-> Seq Scan on subselect_tbl upper
(cost=0.00..1.03 rows=3 width=12)
-> Materialize (cost=0.00..1.04 rows=1
width=4)
@@ -1696,14 +1691,14 @@ EXPLAIN SELECT '' AS five, f1 AS "Correlated Field"
Merge Key: subselect_tbl.f1
-> Sort (cost=6.67..6.69 rows=3 width=4)
Sort Key: subselect_tbl.f1
- -> Hash Semi Join (cost=3.33..6.55 rows=3 width=4)
- Hash Cond: subselect_tbl.f1 = subselect_tbl_1.f2 AND
subselect_tbl.f2 = subselect_tbl_1.f3::integer
- -> Seq Scan on subselect_tbl (cost=0.00..3.08 rows=3 width=8)
- -> Hash (cost=3.22..3.22 rows=3 width=12)
- -> Redistribute Motion 3:3 (slice1; segments: 3)
(cost=0.00..3.22 rows=3 width=12)
+ -> Hash Semi Join (cost=1.11..2.18 rows=2 width=36)
+ Hash Cond: ((subselect_tbl.f1 = subselect_tbl_1.f2) AND
(subselect_tbl.f2 = (subselect_tbl_1.f3)::integer))
+ -> Seq Scan on subselect_tbl (cost=0.00..1.03 rows=3 width=8)
+ -> Hash (cost=1.07..1.07 rows=2 width=12)
+ -> Redistribute Motion 3:3 (slice2; segments: 3)
(cost=0.00..1.07 rows=2 width=12)
Hash Key: subselect_tbl_1.f2
- -> Seq Scan on subselect_tbl subselect_tbl_1
(cost=0.00..3.08 rows=3 width=12)
- Filter: f3 IS NOT NULL
+ -> Seq Scan on subselect_tbl subselect_tbl_1
(cost=0.00..1.03 rows=2 width=12)
+ Filter: (f3 IS NOT NULL)
Optimizer: Postgres query optimizer
(13 rows)
diff --git a/src/test/regress/expected/transient_types.out
b/src/test/regress/expected/transient_types.out
index 6a2846796ca..ae994fa312a 100644
--- a/src/test/regress/expected/transient_types.out
+++ b/src/test/regress/expected/transient_types.out
@@ -300,8 +300,10 @@ drop table t;
-- This test case use UDF assign_new_record() to generate a new record type in
cache
-- for each tuple, these types should be sync to receiver.
-- test function
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set regress_dll :abs_builddir '/regress.so'
create or replace function assign_new_record()
-returns SETOF record as '@abs_builddir@/regress@DLSUFFIX@',
+returns SETOF record as :'regress_dll',
'assign_new_record' LANGUAGE C VOLATILE;
-- transfer record types via motion incrementally
select assign_new_record() from gp_dist_random('gp_id');
diff --git a/src/test/regress/expected/with_clause.out
b/src/test/regress/expected/with_clause.out
index 2e574f1a146..49267ad8320 100644
--- a/src/test/regress/expected/with_clause.out
+++ b/src/test/regress/expected/with_clause.out
@@ -1025,7 +1025,7 @@ SELECT pg_get_viewdef('my_view'::regclass);
SELECT sum(with_test1.value) AS sum+
FROM with_test1 +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
@@ -1036,7 +1036,7 @@ SELECT pg_get_viewdef('my_view'::regclass, true);
SELECT sum(with_test1.value) AS sum+
FROM with_test1 +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
@@ -1059,7 +1059,7 @@ SELECT pg_get_viewdef('my_view'::regclass);
SELECT sum(my_group_sum.total) AS sum +
FROM my_group_sum +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
@@ -1075,7 +1075,7 @@ SELECT pg_get_viewdef('my_view'::regclass, true);
SELECT sum(my_group_sum.total) AS sum +
FROM my_group_sum +
) +
- SELECT my_sum.total +
+ SELECT total +
FROM my_sum;
(1 row)
@@ -2315,7 +2315,7 @@ UNION ALL
Gather Motion 3:1 (slice1; segments: 3)
-> Append
-> HashAggregate
- Group Key: 'a'::text, cte.j
+ Group Key: cte.j
-> Redistribute Motion 3:3 (slice2; segments: 3)
Hash Key: 'a'::text, cte.j
-> Subquery Scan on cte
diff --git a/src/test/regress/expected/workfile/sort_spill.out
b/src/test/regress/expected/workfile/sort_spill.out
index e0a933db32c..70c9aa7b031 100644
--- a/src/test/regress/expected/workfile/sort_spill.out
+++ b/src/test/regress/expected/workfile/sort_spill.out
@@ -42,8 +42,7 @@ select avg(i2) from (select i1,i2 from testsort order by i2)
foo;
select * from sort_spill.is_workfile_created('explain (analyze, verbose)
select i1,i2 from testsort order by i2;');
is_workfile_created
---------------------
- 1
-(1 row)
+(0 rows)
select * from sort_spill.is_workfile_created('explain (analyze, verbose)
select i1,i2 from testsort order by i2 limit 50000;');
is_workfile_created
@@ -60,8 +59,7 @@ select avg(i2) from (select i1,i2 from testsort order by i2)
foo;
select * from sort_spill.is_workfile_created('explain (analyze, verbose)
select i1,i2 from testsort order by i2;');
is_workfile_created
---------------------
- 1
-(1 row)
+(0 rows)
select * from sort_spill.is_workfile_created('explain (analyze, verbose)
select i1,i2 from testsort order by i2 limit 50000;');
is_workfile_created
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index f8bba89b22e..f1068d15c17 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -1287,6 +1287,8 @@ initialize_environment(void)
setenv("PG_LIBDIR", dlpath, 1);
setenv("PG_DLSUFFIX", DLSUFFIX, 1);
setenv("PG_CURUSERNAME", get_user_name(&errstr), 1);
+ setenv("PG_BINDDIR", bindir, 1);
+ setenv("PG_HOSTNAME", get_host_name(is_singlenode_mode ? -1 : 0, 'p'),
1);
if (nolocale)
{
diff --git a/src/test/regress/sql/event_trigger_gp.sql
b/src/test/regress/sql/event_trigger_gp.sql
index fd7aa22892a..5b3aaec7f59 100644
--- a/src/test/regress/sql/event_trigger_gp.sql
+++ b/src/test/regress/sql/event_trigger_gp.sql
@@ -1,3 +1,5 @@
+\getenv libdir PG_LIBDIR
+\set gpextprotocol :libdir '/gpextprotocol.so'
create or replace function test_event_trigger() returns event_trigger as $$
BEGIN
RAISE NOTICE 'test_event_trigger: % %', tg_event, tg_tag;
@@ -11,8 +13,8 @@ create event trigger regress_event_trigger on
ddl_command_start
CREATE EXTERNAL WEB TABLE echotest (x text) EXECUTE 'echo foo;' FORMAT 'text';
DROP EXTERNAL TABLE echotest;
-CREATE OR REPLACE FUNCTION write_to_file() RETURNS integer as
'$libdir/gpextprotocol.so', 'demoprot_export' LANGUAGE C STABLE NO SQL;
-CREATE OR REPLACE FUNCTION read_from_file() RETURNS integer as
'$libdir/gpextprotocol.so', 'demoprot_import' LANGUAGE C STABLE NO SQL;
+CREATE OR REPLACE FUNCTION write_to_file() RETURNS integer as
:'gpextprotocol', 'demoprot_export' LANGUAGE C STABLE NO SQL;
+CREATE OR REPLACE FUNCTION read_from_file() RETURNS integer as
:'gpextprotocol', 'demoprot_import' LANGUAGE C STABLE NO SQL;
CREATE PROTOCOL demoprot_event_trig_test (readfunc = 'read_from_file',
writefunc = 'write_to_file');
diff --git a/src/test/regress/sql/gp_rules.sql
b/src/test/regress/sql/gp_rules.sql
index 8006984a917..d6d722bc673 100644
--- a/src/test/regress/sql/gp_rules.sql
+++ b/src/test/regress/sql/gp_rules.sql
@@ -14,7 +14,7 @@ SELECT relkind FROM pg_class
SELECT 1 FROM gp_distribution_policy
WHERE localoid = 'table_to_view_test1'::regclass;
-DROP VIEW table_to_view_test1;
+DROP TABLE table_to_view_test1;
DROP TABLE table_to_view_test2;
-- Same for an Append-Only table. It is currently not supported.
diff --git a/src/test/regress/sql/gpcopy.sql b/src/test/regress/sql/gpcopy.sql
index 3328c966273..50cafa49938 100644
--- a/src/test/regress/sql/gpcopy.sql
+++ b/src/test/regress/sql/gpcopy.sql
@@ -944,7 +944,9 @@ CREATE TABLE LINEITEM_6 (LIKE LINEITEM);
CREATE TABLE LINEITEM_7 (LIKE LINEITEM);
CREATE TABLE LINEITEM_8 (LIKE LINEITEM);
-COPY LINEITEM FROM '@abs_srcdir@/data/lineitem.csv' WITH DELIMITER '|' CSV;
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set lineitem_csv :abs_srcdir '/data/lineitem.csv'
+COPY LINEITEM FROM :'lineitem_csv' WITH DELIMITER '|' CSV;
ANALYZE LINEITEM;
SELECT COUNT(*) FROM LINEITEM;
COPY LINEITEM TO '/tmp/lineitem.csv' CSV;
@@ -1264,10 +1266,12 @@ CREATE FUNCTION broken_int4in(cstring)
RETURNS broken_int4
AS 'int4in'
LANGUAGE internal IMMUTABLE STRICT;
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set regress_dll :abs_builddir '/regress.so'
CREATE FUNCTION broken_int4out(broken_int4)
RETURNS cstring
- AS '@abs_builddir@/regress@DLSUFFIX@', 'broken_int4out'
+ AS :'regress_dll', 'broken_int4out'
LANGUAGE C IMMUTABLE STRICT;
CREATE TYPE broken_int4 (
diff --git a/src/test/regress/sql/partition_ddl.sql
b/src/test/regress/sql/partition_ddl.sql
index 5c41027b1aa..979359818fc 100644
--- a/src/test/regress/sql/partition_ddl.sql
+++ b/src/test/regress/sql/partition_ddl.sql
@@ -626,7 +626,9 @@ subpartition by range (unique2) subpartition template (
start (0) end (1000) eve
( start (0) end (1000) every (100));
alter table mpp3250 add default partition default_part;
-copy mpp3250 from '@abs_srcdir@/data/onek.data';
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set onek_data :abs_srcdir '/data/onek.data'
+copy mpp3250 from :'onek_data';
CREATE TABLE mpp3375 (
unique1 int4,
@@ -647,7 +649,7 @@ CREATE TABLE mpp3375 (
string4 name
);
-copy mpp3375 from '@abs_srcdir@/data/onek.data';
+copy mpp3375 from :'onek_data';
CREATE TABLE mpp3375a (
unique1 int4,
@@ -736,7 +738,7 @@ CREATE TABLE mpp3261 (
string4 name
);
-copy mpp3261 from '@abs_srcdir@/data/onek.data';
+copy mpp3261 from :'onek_data';
CREATE TABLE mpp3261_part (
diff --git a/src/test/regress/sql/segspace.sql
b/src/test/regress/sql/segspace.sql
index 01b00fc0b5a..477d19a0a1c 100644
--- a/src/test/regress/sql/segspace.sql
+++ b/src/test/regress/sql/segspace.sql
@@ -53,7 +53,7 @@ set local enable_parallel = true;
set local optimizer=off;
set local min_parallel_table_scan_size=0;
set local min_parallel_index_scan_size = 0;
-set local force_parallel_mode=1;
+set local debug_parallel_query=regress;
EXPLAIN(COSTS OFF) SELECT t1.* FROM segspace_test_hj_skew AS t1,
segspace_test_hj_skew AS t2 WHERE t1.i1=t2.i2;
SELECT count(t1.*) FROM segspace_test_hj_skew AS t1, segspace_test_hj_skew AS
t2 WHERE t1.i1=t2.i2;
rollback;
diff --git a/src/test/regress/sql/sreh.sql b/src/test/regress/sql/sreh.sql
index f5db85e677d..c722fd2ee11 100755
--- a/src/test/regress/sql/sreh.sql
+++ b/src/test/regress/sql/sreh.sql
@@ -23,31 +23,33 @@ CREATE TABLE sreh_copy(a int, b int, c int) distributed
by(a);
--
-- ROW reject limit only
--
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' SEGMENT
REJECT LIMIT 1000;
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set bad_data1 :abs_srcdir '/data/bad_data1.data'
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' SEGMENT REJECT LIMIT 1000;
SELECT * FROM sreh_copy ORDER BY a,b,c;
--
-- ROW reject limit only - low value that gets reached
--
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' SEGMENT
REJECT LIMIT 2;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' SEGMENT REJECT LIMIT 2;
SELECT * FROM sreh_copy ORDER BY a,b,c;
--
-- error logs
--
DROP TABLE IF EXISTS sreh_copy; CREATE TABLE sreh_copy(a int, b int, c int)
distributed by(a);
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' LOG
ERRORS INTO WHATEVER SEGMENT REJECT LIMIT 1000;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' LOG ERRORS INTO WHATEVER
SEGMENT REJECT LIMIT 1000;
SET gp_ignore_error_table=true;
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' LOG
ERRORS INTO WHATEVER SEGMENT REJECT LIMIT 1000;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' LOG ERRORS INTO WHATEVER
SEGMENT REJECT LIMIT 1000;
SELECT * FROM sreh_copy ORDER BY a,b,c;
WITH error_log AS (SELECT gp_read_error_log('sreh_copy')) select count(*) from
error_log;
--
-- error logs - do the same thing again. this time error logs exist and should
get data appended
--
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data1.data' DELIMITER '|' LOG
ERRORS SEGMENT REJECT LIMIT 1000;
+COPY sreh_copy FROM :'bad_data1' DELIMITER '|' LOG ERRORS SEGMENT REJECT LIMIT
1000;
SELECT * FROM sreh_copy ORDER BY a,b,c;
SELECT linenum, rawdata FROM gp_read_error_log('sreh_copy') ORDER BY linenum;
@@ -101,16 +103,17 @@ SELECT * FROM sreh_constr; -- should exist and be empty
-- so the percent calculation should always be the same regardless of number of
-- QE's in the system.
--
+\set bad_data3 :abs_srcdir '/data/bad_data3.data'
set gp_reject_percent_threshold = 100;
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT
REJECT LIMIT 10 PERCENT; --pass
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT
REJECT LIMIT 2 PERCENT; --fail
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 10
PERCENT; --pass
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 2 PERCENT;
--fail
--
-- test PERCENT reject limit logic with custom threshold 10 (only practical
for test purposes)
--
set gp_reject_percent_threshold = 10;
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT
REJECT LIMIT 10 PERCENT; --fail
-COPY sreh_copy FROM '@abs_srcdir@/data/bad_data3.data' DELIMITER '|' SEGMENT
REJECT LIMIT 20 PERCENT; --pass
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 10
PERCENT; --fail
+COPY sreh_copy FROM :'bad_data3' DELIMITER '|' SEGMENT REJECT LIMIT 20
PERCENT; --pass
-- MPP-2933 (multiple dist-key attr conversion errors)
create table t2933 (col1 varchar(3) NULL , col2 char(1) NULL, col3 varchar(4)
NULL, col4 char(1) NULL, col5 varchar(20) NULL)
@@ -126,9 +129,9 @@ DROP TABLE sreh_constr;
-- ###########################################################
-- External Tables
-- ###########################################################
-
+\getenv binddir PG_BINDDIR
CREATE EXTERNAL WEB TABLE gpfdist_sreh_start (x text)
-execute E'((@bindir@/gpfdist -p 8080 -d @abs_srcdir@/data </dev/null
>/dev/null 2>&1 &); for i in `seq 1 30`; do curl 127.0.0.1:8080 >/dev/null 2>&1
&& break; sleep 1; done; echo "starting...") '
+execute E'(('||:'binddir'||'/gpfdist -p 8080 -d '||:'abs_srcdir'||'/data
</dev/null >/dev/null 2>&1 &); for i in `seq 1 30`; do curl 127.0.0.1:8080
>/dev/null 2>&1 && break; sleep 1; done; echo "starting...") '
on MASTER
FORMAT 'text' (delimiter '|');
@@ -146,8 +149,9 @@ CREATE TABLE sreh_target(a int, b int, c int) distributed
by(a);
--
-- reject limit only
--
+\getenv hostname PG_HOSTNAME
CREATE EXTERNAL TABLE sreh_ext(a int, b int, c int)
-LOCATION ('gpfdist://@hostname@:8080/bad_data1.data' )
+LOCATION ('gpfdist://'||'@hostname@'||':8080/bad_data1.data' )
FORMAT 'text' (delimiter '|')
SEGMENT REJECT LIMIT 10000;
diff --git a/src/test/regress/sql/transient_types.sql
b/src/test/regress/sql/transient_types.sql
index 0599217f20a..9a2f0b27c62 100644
--- a/src/test/regress/sql/transient_types.sql
+++ b/src/test/regress/sql/transient_types.sql
@@ -146,8 +146,10 @@ drop table t;
-- This test case use UDF assign_new_record() to generate a new record type in
cache
-- for each tuple, these types should be sync to receiver.
-- test function
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set regress_dll :abs_builddir '/regress.so'
create or replace function assign_new_record()
-returns SETOF record as '@abs_builddir@/regress@DLSUFFIX@',
+returns SETOF record as :'regress_dll',
'assign_new_record' LANGUAGE C VOLATILE;
-- transfer record types via motion incrementally
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]