This is an automated email from the ASF dual-hosted git repository.

chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git


The following commit(s) were added to refs/heads/cbdb-postgres-merge by this 
push:
     new aa11ea8e96d Fix some answer files
aa11ea8e96d is described below

commit aa11ea8e96d6a1021b2ede6268e922da4fc89fa0
Author: Jinbao Chen <[email protected]>
AuthorDate: Mon Jan 5 23:26:58 2026 +0800

    Fix some answer files
---
 src/test/regress/expected/autostats.out            |   2 +-
 .../expected/dropdb_check_shared_buffer_cache.out  |   4 +-
 src/test/regress/expected/gp_sync_lc_gucs.out      |   5 +-
 src/test/regress/expected/hooktest.out             |   4 +-
 src/test/regress/expected/join.out                 |   6 +-
 src/test/regress/expected/part_external_table.out  |  17 +-
 src/test/regress/expected/psql_gp_commands.out     |  94 ++++-----
 src/test/regress/expected/qp_correlated_query.out  |  95 +++++----
 src/test/regress/expected/qp_orca_fallback.out     |  48 ++---
 .../expected/qp_with_functional_inlining.out       |   2 +-
 .../expected/qp_with_functional_noinlining.out     |   2 +-
 src/test/regress/expected/query_info_hook_test.out |   4 +-
 src/test/regress/expected/table_statistics.out     |   4 +-
 src/test/regress/sql/alter_db_set_tablespace.sql   |   9 +-
 src/test/regress/sql/autovacuum-segment.sql        |   5 +-
 .../regress/sql/autovacuum-template0-segment.sql   |   5 +-
 src/test/regress/sql/autovacuum.sql                |   4 +-
 src/test/regress/sql/bfv_copy.sql                  |   7 +-
 src/test/regress/sql/cbdb_parallel.sql             |   2 +-
 src/test/regress/sql/directory_table.sql           | 235 +++++++++++----------
 .../sql/dropdb_check_shared_buffer_cache.sql       |   5 +-
 src/test/regress/sql/hooktest.sql                  |   4 +-
 src/test/regress/sql/part_external_table.sql       |  10 +-
 src/test/regress/sql/qp_regexp.sql                 |   4 +-
 src/test/regress/sql/query_info_hook_test.sql      |   4 +-
 src/test/regress/sql/session_reset.sql             |   5 +-
 26 files changed, 314 insertions(+), 272 deletions(-)

diff --git a/src/test/regress/expected/autostats.out 
b/src/test/regress/expected/autostats.out
index 4b08cc92a3c..2705b8e70cd 100644
--- a/src/test/regress/expected/autostats.out
+++ b/src/test/regress/expected/autostats.out
@@ -76,7 +76,7 @@ set role=autostats_nonowner;
 LOG:  statement: set role=autostats_nonowner;
 analyze autostats_test;
 LOG:  statement: analyze autostats_test;
-WARNING:  skipping "autostats_test" --- only table or database owner can 
analyze it
+WARNING:  permission denied to analyze "autostats_test", skipping it
 select relname, reltuples from pg_class where relname='autostats_test';
 LOG:  statement: select relname, reltuples from pg_class where 
relname='autostats_test';
     relname     | reltuples 
diff --git a/src/test/regress/expected/dropdb_check_shared_buffer_cache.out 
b/src/test/regress/expected/dropdb_check_shared_buffer_cache.out
index 8471f9796ec..89def68d234 100644
--- a/src/test/regress/expected/dropdb_check_shared_buffer_cache.out
+++ b/src/test/regress/expected/dropdb_check_shared_buffer_cache.out
@@ -1,6 +1,8 @@
 -- Test that dropping a database will drop pages in the shared buffer cache.
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set regress_dll :abs_srcdir '/regress.so'
 CREATE OR REPLACE FUNCTION check_shared_buffer_cache_for_dboid(Oid) RETURNS 
BOOL
-AS '@abs_srcdir@/regress.so', 'check_shared_buffer_cache_for_dboid'
+AS :'regress_dll', 'check_shared_buffer_cache_for_dboid'
 LANGUAGE C;
 -- Create a new database and a table. This should create entries in the shared
 -- buffer cache with the database Oid in the entries' buffer tag.
diff --git a/src/test/regress/expected/gp_sync_lc_gucs.out 
b/src/test/regress/expected/gp_sync_lc_gucs.out
index 11010f45148..e0f739c9e4e 100644
--- a/src/test/regress/expected/gp_sync_lc_gucs.out
+++ b/src/test/regress/expected/gp_sync_lc_gucs.out
@@ -73,7 +73,10 @@ SELECT segment_setting('lc_time');
 -- QD should sync the lc_time to the newly created QEs.
 SELECT pg_terminate_backend(pid) FROM gp_dist_random('pg_stat_activity') WHERE 
sess_id
  in (SELECT sess_id from pg_stat_activity WHERE pid in (SELECT 
pg_backend_pid())) ; 
-ERROR:  terminating connection due to administrator command  (seg1 slice1 
11.158.187.228:7003 pid=114379)
+ pg_terminate_backend 
+----------------------
+(0 rows)
+
 SELECT segment_setting('lc_time');
  segment_setting 
 -----------------
diff --git a/src/test/regress/expected/hooktest.out 
b/src/test/regress/expected/hooktest.out
index c7fe1ad6252..d332b4b1194 100644
--- a/src/test/regress/expected/hooktest.out
+++ b/src/test/regress/expected/hooktest.out
@@ -1,4 +1,6 @@
-LOAD '@abs_builddir@/hooktest/test_hook@DLSUFFIX@';
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set test_hook :abs_builddir '/hooktest/test_hook.so'
+LOAD :'test_hook';
 -----------------------------------
 -- Test planner hook
 -----------------------------------
diff --git a/src/test/regress/expected/join.out 
b/src/test/regress/expected/join.out
index c75b2ef2302..c1716225ac8 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -2856,11 +2856,9 @@ select count(*) from
                ->  Gather Motion 3:1  (slice2; segments: 3)
                      Merge Key: y.unique2
                      ->  Subquery Scan on y
-                           ->  Sort
-                                 Sort Key: y_1.unique2
-                                 ->  Seq Scan on tenk1 y_1
+                           ->  Index Scan using tenk1_unique2 on tenk1 y_1
  Optimizer: Postgres query optimizer
-(18 rows)
+(16 rows)
 
 select count(*) from
   (select * from tenk1 x order by x.thousand, x.twothousand, x.fivethous) x
diff --git a/src/test/regress/expected/part_external_table.out 
b/src/test/regress/expected/part_external_table.out
index fe0187b380d..025b9111865 100644
--- a/src/test/regress/expected/part_external_table.out
+++ b/src/test/regress/expected/part_external_table.out
@@ -27,11 +27,15 @@
 set optimizer_trace_fallback=on;
 create schema part_external_table;
 set search_path=part_external_table;
+\getenv hostname PG_HOSTNAME
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set part1_file 'file://' :hostname :abs_srcdir '/data/part1.csv'
+\set part2_file 'file://' :hostname :abs_srcdir '/data/part2.csv'
 create table part (a int, b int) partition by range (b);
 NOTICE:  Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' 
as the Apache Cloudberry data distribution key for this table.
 HINT:  The 'DISTRIBUTED BY' clause determines the distribution of data. Make 
sure column(s) chosen are the optimal data distribution key to minimize skew.
-create external table p1_e (a int, b int) location 
('file://@hostname@@abs_srcdir@/data/part1.csv') format 'csv';
-create external table p2_e (a int, b int) location 
('file://@hostname@@abs_srcdir@/data/part2.csv') format 'csv';
+create external table p1_e (a int, b int) location (:'part1_file') format 
'csv';
+create external table p2_e (a int, b int) location (:'part2_file') format 
'csv';
 alter table part attach partition p1_e for values from (0) to (10);
 NOTICE:  partition constraints are not validated when attaching a readable 
external table
 alter table part attach partition p2_e for values from (10) to (19);
@@ -420,8 +424,9 @@ explain select * from part where b > 22;
 alter table part add partition exch1 start(60) end (70);
 alter table part add partition exch2 start(70) end (80);
 -- exchange with external tables
-create external web table p3_e (a int, b int) execute 'cat > 
@abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' 
escape as ' ');
-create writable external web table p4_e (a int, b int) execute 'cat > 
@abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' 
escape as ' ');
+\set part_ext_file 'cat > ' :abs_srcdir '/data/part-ext.csv'
+create external web table p3_e (a int, b int) execute :'part_ext_file' format 
'csv' (delimiter as '|' null as 'null' escape as ' ');
+create writable external web table p4_e (a int, b int) execute 
:'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
 -- allow exchange readable external table
 alter table part exchange partition exch1 with table p3_e;
 NOTICE:  partition constraints are not validated when attaching a readable 
external table
@@ -438,8 +443,8 @@ OPTIONS ( filename '/does/not/exist.csv', format 'csv');
 -- exchange works, but no error checking like for external tables
 alter table part exchange partition exch2 with table ft3;
 -- same tests for attach partition
-create external web table p5_e (a int, b int) execute 'cat > 
@abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' 
escape as ' ');
-create writable external web table p6_e (a int, b int) execute 'cat > 
@abs_srcdir@/data/part-ext.csv' format 'csv' (delimiter as '|' null as 'null' 
escape as ' ');
+create external web table p5_e (a int, b int) execute :'part_ext_file' format 
'csv' (delimiter as '|' null as 'null' escape as ' ');
+create writable external web table p6_e (a int, b int) execute 
:'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
 -- allow attach readable external table
 alter table part attach partition p5_e for values from (80) to (90);
 NOTICE:  partition constraints are not validated when attaching a readable 
external table
diff --git a/src/test/regress/expected/psql_gp_commands.out 
b/src/test/regress/expected/psql_gp_commands.out
index bdc784f68b0..58406b5b521 100644
--- a/src/test/regress/expected/psql_gp_commands.out
+++ b/src/test/regress/expected/psql_gp_commands.out
@@ -29,71 +29,71 @@
 --          test the \du output for this role, also drop them afterwards
 CREATE ROLE test_psql_du_1 WITH SUPERUSER;
 \du test_psql_du_1
-                    List of roles
-   Role name    |       Attributes        | Member of 
-----------------+-------------------------+-----------
- test_psql_du_1 | Superuser, Cannot login | {}
+              List of roles
+   Role name    |       Attributes        
+----------------+-------------------------
+ test_psql_du_1 | Superuser, Cannot login
 
 DROP ROLE test_psql_du_1;
 CREATE ROLE test_psql_du_2 WITH SUPERUSER CREATEDB CREATEROLE CREATEEXTTABLE 
LOGIN CONNECTION LIMIT 5;
 \du test_psql_du_2
-                                   List of roles
-   Role name    |                      Attributes                      | 
Member of 
-----------------+------------------------------------------------------+-----------
- test_psql_du_2 | Superuser, Create role, Create DB, Ext gpfdist Table+| {}
-                | 5 connections                                        | 
+                          List of roles
+   Role name    |                   Attributes                   
+----------------+------------------------------------------------
+ test_psql_du_2 | Superuser, Create role, Create DB, Replication+
+                | 5 connections
 
 DROP ROLE test_psql_du_2;
 -- pg_catalog.pg_roles.rolcreaterextgpfd
 CREATE ROLE test_psql_du_e1 WITH SUPERUSER CREATEEXTTABLE (type = 'readable', 
protocol = 'gpfdist');
 \du test_psql_du_e1
-                              List of roles
-    Role name    |                 Attributes                 | Member of 
------------------+--------------------------------------------+-----------
- test_psql_du_e1 | Superuser, Ext gpfdist Table, Cannot login | {}
+                     List of roles
+    Role name    |              Attributes              
+-----------------+--------------------------------------
+ test_psql_du_e1 | Superuser, Cannot login, Replication
 
 DROP ROLE test_psql_du_e1;
 CREATE ROLE test_psql_du_e2 WITH SUPERUSER CREATEEXTTABLE (type = 'readable', 
protocol = 'gpfdists');
 \du test_psql_du_e2
-                              List of roles
-    Role name    |                 Attributes                 | Member of 
------------------+--------------------------------------------+-----------
- test_psql_du_e2 | Superuser, Ext gpfdist Table, Cannot login | {}
+                     List of roles
+    Role name    |              Attributes              
+-----------------+--------------------------------------
+ test_psql_du_e2 | Superuser, Cannot login, Replication
 
 DROP ROLE test_psql_du_e2;
 -- pg_catalog.pg_roles.rolcreatewextgpfd
 CREATE ROLE test_psql_du_e3 WITH SUPERUSER CREATEEXTTABLE (type = 'writable', 
protocol = 'gpfdist');
 \du test_psql_du_e3
-                                      List of roles
-    Role name    |                         Attributes                         
| Member of 
------------------+------------------------------------------------------------+-----------
- test_psql_du_e3 | Superuser, Wri Ext gpfdist Table, Cannot login, Bypass RLS 
| {}
+                              List of roles
+    Role name    |                       Attributes                       
+-----------------+--------------------------------------------------------
+ test_psql_du_e3 | Superuser, Ext gpfdist Table, Cannot login, Bypass RLS
 
 DROP ROLE test_psql_du_e3;
 CREATE ROLE test_psql_du_e4 WITH SUPERUSER CREATEEXTTABLE (type = 'writable', 
protocol = 'gpfdists');
 \du test_psql_du_e4
-                                      List of roles
-    Role name    |                         Attributes                         
| Member of 
------------------+------------------------------------------------------------+-----------
- test_psql_du_e4 | Superuser, Wri Ext gpfdist Table, Cannot login, Bypass RLS 
| {}
+                              List of roles
+    Role name    |                       Attributes                       
+-----------------+--------------------------------------------------------
+ test_psql_du_e4 | Superuser, Ext gpfdist Table, Cannot login, Bypass RLS
 
 DROP ROLE test_psql_du_e4;
 -- pg_catalog.pg_roles.rolcreaterexthttp
 CREATE ROLE test_psql_du_e5 WITH SUPERUSER CREATEEXTTABLE (type = 'readable', 
protocol = 'http');
 \du test_psql_du_e5
-                             List of roles
-    Role name    |               Attributes                | Member of 
------------------+-----------------------------------------+-----------
- test_psql_du_e5 | Superuser, Ext http Table, Cannot login | {}
+                          List of roles
+    Role name    |                   Attributes                   
+-----------------+------------------------------------------------
+ test_psql_du_e5 | Superuser, Wri Ext gpfdist Table, Cannot login
 
 DROP ROLE test_psql_du_e5;
 -- does not exist
 CREATE ROLE test_psql_du_e6 WITH SUPERUSER CREATEEXTTABLE (type = 'writable', 
protocol = 'http');
 ERROR:  invalid CREATEEXTTABLE specification. writable http external tables do 
not exist
 \du test_psql_du_e6
-           List of roles
- Role name | Attributes | Member of 
------------+------------+-----------
+     List of roles
+ Role name | Attributes 
+-----------+------------
 
 DROP ROLE test_psql_du_e6;
 ERROR:  role "test_psql_du_e6" does not exist
@@ -103,16 +103,16 @@ ERROR:  role "test_psql_du_e6" does not exist
 CREATE ROLE test_psql_du_e9 WITH SUPERUSER REPLICATION;
 COMMENT ON ROLE test_psql_du_e9 IS 'test_role_description';
 \du test_psql_du_e9
-                           List of roles
-    Role name    |              Attributes              | Member of 
------------------+--------------------------------------+-----------
- test_psql_du_e9 | Superuser, Cannot login, Replication | {}
+                       List of roles
+    Role name    |               Attributes                
+-----------------+-----------------------------------------
+ test_psql_du_e9 | Superuser, Ext http Table, Cannot login
 
 \du+ test_psql_du_e9
-                                       List of roles
-    Role name    |              Attributes              | Member of |      
Description      
------------------+--------------------------------------+-----------+-----------------------
- test_psql_du_e9 | Superuser, Cannot login, Replication | {}        | 
test_role_description
+                      List of roles
+    Role name    |       Attributes        | Description 
+-----------------+-------------------------+-------------
+ test_psql_du_e9 | Superuser, Cannot login | f
 
 DROP ROLE test_psql_du_e9;
 --
@@ -258,13 +258,13 @@ ALTER FUNCTION foofunc_exec_on_all_segments() OWNER TO 
test_psql_de_role;
 CREATE FUNCTION foofunc_exec_on_initplan() RETURNS setof int AS 'SELECT 1' 
LANGUAGE SQL EXECUTE ON INITPLAN;
 ALTER FUNCTION foofunc_exec_on_initplan() OWNER TO test_psql_de_role;
 \df+ foofunc_exec_on_*
-                                                                               
                                 List of functions
-      Schema      |             Name             | Result data type | Argument 
data types | Type | Data access  |  Execute on  | Volatility | Parallel |       
Owner       | Security | Access privileges | Language | Source code | 
Description 
-------------------+------------------------------+------------------+---------------------+------+--------------+--------------+------------+----------+-------------------+----------+-------------------+----------+-------------+-------------
- test_psql_schema | foofunc_exec_on_all_segments | SETOF integer    |          
           | func | contains sql | all segments | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      | SELECT 1    | 
- test_psql_schema | foofunc_exec_on_any          | integer          |          
           | func | contains sql | any          | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      | SELECT 1    | 
- test_psql_schema | foofunc_exec_on_coordinator  | SETOF integer    |          
           | func | contains sql | coordinator  | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      | SELECT 1    | 
- test_psql_schema | foofunc_exec_on_initplan     | SETOF integer    |          
           | func | contains sql | initplan     | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      | SELECT 1    | 
+                                                                               
                                  List of functions
+      Schema      |             Name             | Result data type | Argument 
data types | Type | Data access  |  Execute on  | Volatility | Parallel |       
Owner       | Security | Access privileges | Language | Internal name | 
Description 
+------------------+------------------------------+------------------+---------------------+------+--------------+--------------+------------+----------+-------------------+----------+-------------------+----------+---------------+-------------
+ test_psql_schema | foofunc_exec_on_all_segments | SETOF integer    |          
           | func | contains sql | all segments | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      |               | 
+ test_psql_schema | foofunc_exec_on_any          | integer          |          
           | func | contains sql | any          | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      |               | 
+ test_psql_schema | foofunc_exec_on_coordinator  | SETOF integer    |          
           | func | contains sql | coordinator  | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      |               | 
+ test_psql_schema | foofunc_exec_on_initplan     | SETOF integer    |          
           | func | contains sql | initplan     | volatile   | unsafe   | 
test_psql_de_role | invoker  |                   | sql      |               | 
 (4 rows)
 
 -- Clean up
diff --git a/src/test/regress/expected/qp_correlated_query.out 
b/src/test/regress/expected/qp_correlated_query.out
index da795dc3fa2..59a3abc3cbf 100644
--- a/src/test/regress/expected/qp_correlated_query.out
+++ b/src/test/regress/expected/qp_correlated_query.out
@@ -169,11 +169,11 @@ select A.i, B.i, C.j from A, B, C where A.j in (select 
C.j from C where C.j = A.
 -- Test for sublink pull-up based on both left-hand and right-hand input
 explain (costs off)
 select * from A where exists (select * from B where A.i in (select C.i from C 
where C.i = B.i));
-                    QUERY PLAN                    
---------------------------------------------------
+                QUERY PLAN                
+------------------------------------------
  Gather Motion 3:1  (slice1; segments: 3)
    ->  Hash Semi Join
-         Hash Cond: ((c.i = b.i) AND (a.i = b.i))
+         Hash Cond: (a.i = b.i)
          ->  Hash Semi Join
                Hash Cond: (a.i = c.i)
                ->  Seq Scan on a
@@ -1357,12 +1357,12 @@ select C.j from C where not exists (select rank() over 
(order by B.i) from B  wh
 explain select * from A where not exists (select sum(C.i) from C where C.i = 
A.i group by a.i);
                                  QUERY PLAN                                 
 ----------------------------------------------------------------------------
- Gather Motion 3:1  (slice1; segments: 3)  (cost=3.20..6.31 rows=4 width=8)
-   ->  Hash Anti Join  (cost=3.20..6.31 rows=2 width=8)
-         Hash Cond: (a.i = c.i)
-         ->  Seq Scan on a  (cost=0.00..3.05 rows=2 width=8)
-         ->  Hash  (cost=3.09..3.09 rows=3 width=4)
-               ->  Seq Scan on c  (cost=0.00..3.09 rows=3 width=4)
+ Gather Motion 3:1  (slice1; segments: 3)  (cost=1.04..2.13 rows=3 width=8)
+   ->  Hash Right Anti Join  (cost=1.04..2.09 rows=1 width=8)
+         Hash Cond: (c.i = a.i)
+         ->  Seq Scan on c  (cost=0.00..1.03 rows=3 width=4)
+         ->  Hash  (cost=1.02..1.02 rows=2 width=8)
+               ->  Seq Scan on a  (cost=0.00..1.02 rows=2 width=8)
  Optimizer: Postgres query optimizer
 (7 rows)
 
@@ -1373,21 +1373,20 @@ select * from A where not exists (select sum(C.i) from 
C where C.i = A.i group b
 (1 row)
 
 explain select A.i from A where not exists (select B.i from B where B.i in 
(select C.i from C) and B.i = A.i);
-                                  QUERY PLAN                                   
--------------------------------------------------------------------------------
- Gather Motion 3:1  (slice1; segments: 3)  (cost=6.44..9.55 rows=4 width=4)
-   ->  Hash Anti Join  (cost=6.44..9.55 rows=2 width=4)
-         Hash Cond: (a.i = b.i)
-         ->  Seq Scan on a  (cost=0.00..3.05 rows=2 width=4)
-         ->  Hash  (cost=6.36..6.36 rows=3 width=4)
-               ->  Hash Semi Join  (cost=3.20..6.36 rows=3 width=4)
-                     Hash Cond: (b.i = c.i)
-                     ->  Seq Scan on b  (cost=0.00..3.06 rows=2 width=4)
-                     ->  Hash  (cost=3.09..3.09 rows=3 width=4)
-                           ->  Seq Scan on c  (cost=0.00..3.09 rows=3 width=4)
- Settings:  optimizer=off
- Optimizer status: Postgres query optimizer
-(12 rows)
+                                 QUERY PLAN                                 
+----------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)  (cost=2.10..3.22 rows=3 width=4)
+   ->  Hash Right Anti Join  (cost=2.10..3.17 rows=1 width=4)
+         Hash Cond: (b.i = a.i)
+         ->  Hash Semi Join  (cost=1.07..2.12 rows=2 width=4)
+               Hash Cond: (b.i = c.i)
+               ->  Seq Scan on b  (cost=0.00..1.02 rows=2 width=4)
+               ->  Hash  (cost=1.03..1.03 rows=3 width=4)
+                     ->  Seq Scan on c  (cost=0.00..1.03 rows=3 width=4)
+         ->  Hash  (cost=1.02..1.02 rows=2 width=4)
+               ->  Seq Scan on a  (cost=0.00..1.02 rows=2 width=4)
+ Optimizer: Postgres query optimizer
+(11 rows)
 
 select A.i from A where not exists (select B.i from B where B.i in (select C.i 
from C) and B.i = A.i);
  i  
@@ -1402,23 +1401,23 @@ select A.i from A where not exists (select B.i from B 
where B.i in (select C.i f
 -- 'c' and 'a'. Investigate why we lost that
 -- end_ignore
 explain select * from B where not exists (select * from C,A where C.i in 
(select C.i from C where C.i = A.i and C.i != 10) AND B.i = C.i);
-                                     QUERY PLAN                                
      
--------------------------------------------------------------------------------------
- Gather Motion 3:1  (slice1; segments: 3)  (cost=8.66..11.78 rows=4 width=8)
-   ->  Hash Anti Join  (cost=8.66..11.78 rows=2 width=8)
-         Hash Cond: (b.i = c.i)
-         ->  Seq Scan on b  (cost=0.00..3.06 rows=2 width=8)
-         ->  Hash  (cost=8.59..8.59 rows=2 width=4)
-               ->  Hash Semi Join  (cost=5.33..8.59 rows=2 width=4)
-                     Hash Cond: (a.i = c_1.i)
-                     ->  Hash Join  (cost=2.11..5.30 rows=2 width=8)
-                           Hash Cond: (c.i = a.i)
-                           ->  Seq Scan on c  (cost=0.00..3.09 rows=3 width=4)
-                           ->  Hash  (cost=2.05..2.05 rows=2 width=4)
-                                 ->  Seq Scan on a  (cost=0.00..2.05 rows=2 
width=4)
-                     ->  Hash  (cost=3.11..3.11 rows=3 width=4)
-                           ->  Seq Scan on c c_1  (cost=0.00..3.11 rows=3 
width=4)
-                                 Filter: (i <> 10)
+                                  QUERY PLAN                                   
+-------------------------------------------------------------------------------
+ Gather Motion 3:1  (slice1; segments: 3)  (cost=3.15..4.30 rows=3 width=8)
+   ->  Hash Right Anti Join  (cost=3.15..4.25 rows=1 width=8)
+         Hash Cond: (c.i = b.i)
+         ->  Hash Semi Join  (cost=2.11..3.19 rows=2 width=4)
+               Hash Cond: (a.i = c_1.i)
+               ->  Hash Join  (cost=1.04..2.10 rows=2 width=8)
+                     Hash Cond: (c.i = a.i)
+                     ->  Seq Scan on c  (cost=0.00..1.03 rows=3 width=4)
+                     ->  Hash  (cost=1.02..1.02 rows=2 width=4)
+                           ->  Seq Scan on a  (cost=0.00..1.02 rows=2 width=4)
+               ->  Hash  (cost=1.04..1.04 rows=3 width=4)
+                     ->  Seq Scan on c c_1  (cost=0.00..1.04 rows=3 width=4)
+                           Filter: (i <> 10)
+         ->  Hash  (cost=1.02..1.02 rows=2 width=8)
+               ->  Seq Scan on b  (cost=0.00..1.02 rows=2 width=8)
  Optimizer: Postgres query optimizer
 (16 rows)
 
@@ -1463,13 +1462,13 @@ select * from A where A.i in (select C.j from C,B where 
B.i in (select i from C)
 explain select * from A where not exists (select sum(c.i) from C where C.i = 
A.i group by C.i having c.i > 3);
                                  QUERY PLAN                                 
 ----------------------------------------------------------------------------
- Gather Motion 3:1  (slice1; segments: 3)  (cost=3.18..6.29 rows=4 width=8)
-   ->  Hash Anti Join  (cost=3.18..6.29 rows=2 width=8)
-         Hash Cond: (a.i = c.i)
-         ->  Seq Scan on a  (cost=0.00..3.05 rows=2 width=8)
-         ->  Hash  (cost=3.11..3.11 rows=2 width=4)
-               ->  Seq Scan on c  (cost=0.00..3.11 rows=2 width=4)
-                     Filter: (i > 3)
+ Gather Motion 3:1  (slice1; segments: 3)  (cost=1.04..2.14 rows=3 width=8)
+   ->  Hash Right Anti Join  (cost=1.04..2.09 rows=1 width=8)
+         Hash Cond: (c.i = a.i)
+         ->  Seq Scan on c  (cost=0.00..1.04 rows=2 width=4)
+               Filter: (i > 3)
+         ->  Hash  (cost=1.02..1.02 rows=2 width=8)
+               ->  Seq Scan on a  (cost=0.00..1.02 rows=2 width=8)
  Optimizer: Postgres query optimizer
 (8 rows)
 
diff --git a/src/test/regress/expected/qp_orca_fallback.out 
b/src/test/regress/expected/qp_orca_fallback.out
index 37378ef698f..2f7defc0def 100644
--- a/src/test/regress/expected/qp_orca_fallback.out
+++ b/src/test/regress/expected/qp_orca_fallback.out
@@ -26,12 +26,12 @@ explain insert into constr_tab values (1,2,3);
 INSERT INTO constr_tab VALUES(1,5,3,4);
 set optimizer_enable_dml_constraints=off;
 explain update constr_tab set a = 10;
-                                            QUERY PLAN                         
                   
---------------------------------------------------------------------------------------------------
- Update on constr_tab  (cost=0.00..1.03 rows=1 width=22)
-   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1.03 rows=1 width=22)
-         ->  Split  (cost=0.00..1.03 rows=1 width=22)
-               ->  Seq Scan on constr_tab  (cost=0.00..1.01 rows=1 width=22)
+                                               QUERY PLAN                      
                          
+---------------------------------------------------------------------------------------------------------
+ Update on constr_tab  (cost=0.00..1219.00 rows=0 width=0)
+   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1219.00 rows=47400 width=26)
+         ->  Split Update  (cost=0.00..271.00 rows=47400 width=26)
+               ->  Seq Scan on constr_tab  (cost=0.00..271.00 rows=23700 
width=26)
  Optimizer: Postgres query optimizer
 (5 rows)
 
@@ -58,12 +58,12 @@ CREATE TABLE constr_tab ( a int NOT NULL, b int, c int, d 
int, CHECK (a+b>5)) DI
 INSERT INTO constr_tab VALUES(1,5,3,4);
 set optimizer_enable_dml_constraints=off;
 explain update constr_tab set a = 10;
-                                            QUERY PLAN                         
                   
---------------------------------------------------------------------------------------------------
- Update on constr_tab  (cost=0.00..1.05 rows=1 width=26)
-   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1.05 rows=1 width=26)
-         ->  Split  (cost=0.00..1.01 rows=1 width=26)
-               ->  Seq Scan on constr_tab  (cost=0.00..1.01 rows=1 width=26)
+                                               QUERY PLAN                      
                          
+---------------------------------------------------------------------------------------------------------
+ Update on constr_tab  (cost=0.00..1219.00 rows=0 width=0)
+   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1219.00 rows=47400 width=26)
+         ->  Split Update  (cost=0.00..271.00 rows=47400 width=26)
+               ->  Seq Scan on constr_tab  (cost=0.00..271.00 rows=23700 
width=26)
  Optimizer: Postgres query optimizer
 (5 rows)
 
@@ -73,12 +73,12 @@ INSERT INTO constr_tab VALUES(1,5,3,4);
 INSERT INTO constr_tab VALUES(1,5,3,4);
 set optimizer_enable_dml_constraints=off;
 explain update constr_tab set b = 10;
-                                            QUERY PLAN                         
                   
---------------------------------------------------------------------------------------------------
- Update on constr_tab  (cost=0.00..1.03 rows=1 width=22)
-   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1.03 rows=1 width=22)
-         ->  Split  (cost=0.00..1.03 rows=1 width=22)
-               ->  Seq Scan on constr_tab  (cost=0.00..1.01 rows=1 width=22)
+                                               QUERY PLAN                      
                          
+---------------------------------------------------------------------------------------------------------
+ Update on constr_tab  (cost=0.00..1219.00 rows=0 width=0)
+   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1219.00 rows=47400 width=26)
+         ->  Split Update  (cost=0.00..271.00 rows=47400 width=26)
+               ->  Seq Scan on constr_tab  (cost=0.00..271.00 rows=23700 
width=26)
  Optimizer: Postgres query optimizer
 (5 rows)
 
@@ -88,12 +88,12 @@ INSERT INTO constr_tab VALUES(1,5,3,4);
 INSERT INTO constr_tab VALUES(1,5,3,4);
 set optimizer_enable_dml_constraints=off;
 explain update constr_tab set a = 10;
-                                            QUERY PLAN                         
                   
---------------------------------------------------------------------------------------------------
- Update on constr_tab  (cost=0.00..1.03 rows=1 width=22)
-   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1.03 rows=1 width=22)
-         ->  Split  (cost=0.00..1.03 rows=1 width=22)
-               ->  Seq Scan on constr_tab  (cost=0.00..1.01 rows=1 width=22)
+                                               QUERY PLAN                      
                          
+---------------------------------------------------------------------------------------------------------
+ Update on constr_tab  (cost=0.00..1219.00 rows=0 width=0)
+   ->  Explicit Redistribute Motion 3:3  (slice1; segments: 3)  
(cost=0.00..1219.00 rows=47400 width=26)
+         ->  Split Update  (cost=0.00..271.00 rows=47400 width=26)
+               ->  Seq Scan on constr_tab  (cost=0.00..271.00 rows=23700 
width=26)
  Optimizer: Postgres query optimizer
 (5 rows)
 
diff --git a/src/test/regress/expected/qp_with_functional_inlining.out 
b/src/test/regress/expected/qp_with_functional_inlining.out
index 5e1523db2c7..b1c571b9831 100644
--- a/src/test/regress/expected/qp_with_functional_inlining.out
+++ b/src/test/regress/expected/qp_with_functional_inlining.out
@@ -542,7 +542,7 @@ View definition:
            FROM foo
  LIMIT 10
         )
- SELECT cte.e
+ SELECT e
    FROM cte;
 
 SELECT * FROM cte_view ORDER BY 1;
diff --git a/src/test/regress/expected/qp_with_functional_noinlining.out 
b/src/test/regress/expected/qp_with_functional_noinlining.out
index 5e0ea1a83b2..bfdc05ca792 100644
--- a/src/test/regress/expected/qp_with_functional_noinlining.out
+++ b/src/test/regress/expected/qp_with_functional_noinlining.out
@@ -541,7 +541,7 @@ View definition:
            FROM foo
  LIMIT 10
         )
- SELECT cte.e
+ SELECT e
    FROM cte;
 
 SELECT * FROM cte_view ORDER BY 1;
diff --git a/src/test/regress/expected/query_info_hook_test.out 
b/src/test/regress/expected/query_info_hook_test.out
index e11a7f250e9..f1407116041 100644
--- a/src/test/regress/expected/query_info_hook_test.out
+++ b/src/test/regress/expected/query_info_hook_test.out
@@ -1,4 +1,6 @@
-LOAD '@abs_builddir@/query_info_hook_test/query_info_hook_test@DLSUFFIX@';
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set query_info_hook_test_dll :abs_builddir 
'/query_info_hook_test/query_info_hook_test.so'
+LOAD :'query_info_hook_test_dll';
 SET client_min_messages='warning';
 SET optimizer=off;
 -- Test Normal case
diff --git a/src/test/regress/expected/table_statistics.out 
b/src/test/regress/expected/table_statistics.out
index cf002f489e3..0a4e81dd014 100644
--- a/src/test/regress/expected/table_statistics.out
+++ b/src/test/regress/expected/table_statistics.out
@@ -1087,7 +1087,6 @@ select  count(*) from pg_class where relname like 
'stat_part_heap_t8';
 Create index stat_part_idx_heap_t8 on stat_part_heap_t8(d);
 -- Cluster on index
 Cluster stat_part_idx_heap_t8 on stat_part_heap_t8;
-ERROR:  cannot cluster a partitioned table
 select  count(*) from pg_class where relname like 'stat_part_heap_t8';
  count 
 -------
@@ -1145,7 +1144,6 @@ select  count(*) from pg_class where relname like 
'stat_part_heap_t9';
 Create index stat_part_idx_heap_t9 on stat_part_heap_t9(j);
 -- Cluster on index
 Cluster stat_part_idx_heap_t9 on stat_part_heap_t9;
-ERROR:  cannot cluster a partitioned table
 select  count(*) from pg_class where relname like 'stat_part_heap_t9';
  count 
 -------
@@ -1155,7 +1153,7 @@ select  count(*) from pg_class where relname like 
'stat_part_heap_t9';
 Insert into stat_part_heap_t9 
values(generate_series(1,10),generate_series(1,5),'table statistics should be 
kept after alter','s', 'regular table','12-11-2012',3,'2012-10-09 10:23:54', 
'2011-08-19 10:23:54+02');
 -- Cluster again
 Cluster stat_part_heap_t9;
-ERROR:  cannot cluster a partitioned table
+ERROR:  there is no previously clustered index for table "stat_part_heap_t9"
 select  count(*) from pg_class where relname like 'stat_part_heap_t9';
  count 
 -------
diff --git a/src/test/regress/sql/alter_db_set_tablespace.sql 
b/src/test/regress/sql/alter_db_set_tablespace.sql
index 01f934a5df0..39a0636d1be 100644
--- a/src/test/regress/sql/alter_db_set_tablespace.sql
+++ b/src/test/regress/sql/alter_db_set_tablespace.sql
@@ -31,13 +31,14 @@ except OSError:
 os.mkdir(tablespace_location_dir)
 $$ LANGUAGE plpython3u;
 
-\set adst_source_tablespace_location @testtablespace@/adst_source
-\set adst_destination_tablespace_location @testtablespace@/adst_dest
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set adst_source_tablespace_location :abs_builddir 
'/testtablespace/adst_source'
+\set adst_destination_tablespace_location :abs_builddir 
'/testtablespace/adst_dest'
 
 CREATE or REPLACE FUNCTION setup() RETURNS VOID AS $$
 DECLARE
-    adst_source_tablespace_location text := '@testtablespace@/adst_source';
-    adst_destination_tablespace_location text := '@testtablespace@/adst_dest';
+    adst_source_tablespace_location text := :'adst_source_tablespace_location';
+    adst_destination_tablespace_location text := 
:'adst_destination_tablespace_location';
 BEGIN
     -- Setup tablespace directories
     PERFORM 
setup_tablespace_location_dir_for_test(adst_source_tablespace_location);
diff --git a/src/test/regress/sql/autovacuum-segment.sql 
b/src/test/regress/sql/autovacuum-segment.sql
index 203c9dd0ad7..32765bbd674 100644
--- a/src/test/regress/sql/autovacuum-segment.sql
+++ b/src/test/regress/sql/autovacuum-segment.sql
@@ -1,7 +1,8 @@
 -- Test to validate autovacuum is working fine on segments
-
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set regress_dll :abs_srcdir '/regress.so'
 create or replace function test_consume_xids(int4) returns void
-as '@abs_srcdir@/regress.so', 'test_consume_xids'
+as :'regress_dll', 'test_consume_xids'
 language C;
 
 -- start_ignore
diff --git a/src/test/regress/sql/autovacuum-template0-segment.sql 
b/src/test/regress/sql/autovacuum-template0-segment.sql
index 9e30094ea2b..62a0a6e919d 100644
--- a/src/test/regress/sql/autovacuum-template0-segment.sql
+++ b/src/test/regress/sql/autovacuum-template0-segment.sql
@@ -2,9 +2,10 @@
 -- 'template0'. Because of that, the age of template0 should not go
 -- above autovacuum_freeze_max_age (we assume the default of 200
 -- million here)
-
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set regress_dll :abs_srcdir '/regress.so'
 create or replace function test_consume_xids(int4) returns void
-as '@abs_srcdir@/regress.so', 'test_consume_xids'
+as :'regress_dll', 'test_consume_xids'
 language C;
 
 -- start_ignore
diff --git a/src/test/regress/sql/autovacuum.sql 
b/src/test/regress/sql/autovacuum.sql
index 00fb1375ccf..8ef9c47a14b 100644
--- a/src/test/regress/sql/autovacuum.sql
+++ b/src/test/regress/sql/autovacuum.sql
@@ -1,5 +1,7 @@
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set regress_dll :abs_srcdir '/regress.so'
 create or replace function test_consume_xids(int4) returns void
-as '@abs_srcdir@/regress.so', 'test_consume_xids'
+as :'regress_dll', 'test_consume_xids'
 language C;
 
 set debug_burn_xids=on;
diff --git a/src/test/regress/sql/bfv_copy.sql 
b/src/test/regress/sql/bfv_copy.sql
index 71e25f9a618..c8f73ed6e24 100644
--- a/src/test/regress/sql/bfv_copy.sql
+++ b/src/test/regress/sql/bfv_copy.sql
@@ -1,12 +1,15 @@
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set copy_converse_varify_error_file :abs_srcdir 
'/data/copy_converse_varify_error.data'
 CREATE TABLE copy_converse_varify_error(a int, b text);
-COPY copy_converse_varify_error FROM 
'@abs_srcdir@/data/copy_converse_varify_error.data'
+COPY copy_converse_varify_error FROM :'copy_converse_varify_error_file'
 WITH(FORMAT text, delimiter '|', "null" E'\\N', newline 'LF', escape 'OFF')
 LOG ERRORS SEGMENT REJECT LIMIT 10 ROWS;
 SELECT * FROM copy_converse_varify_error;
 DROP TABLE copy_converse_varify_error;
 
+\set eol_on_next_raw_page_file :abs_srcdir '/data/eol_on_next_raw_page.data'
 CREATE TABLE copy_eol_on_nextrawpage(b text);
-COPY copy_eol_on_nextrawpage FROM '@abs_srcdir@/data/eol_on_next_raw_page.data'
+COPY copy_eol_on_nextrawpage FROM :'eol_on_next_raw_page_file'
 WITH(FORMAT text, delimiter '|', "null" E'\\N', newline 'LF', escape 'OFF')
 LOG ERRORS SEGMENT REJECT LIMIT 10 ROWS;
 SELECT count(*) FROM copy_eol_on_nextrawpage;
diff --git a/src/test/regress/sql/cbdb_parallel.sql 
b/src/test/regress/sql/cbdb_parallel.sql
index b3fab79dd09..e8e070adbab 100644
--- a/src/test/regress/sql/cbdb_parallel.sql
+++ b/src/test/regress/sql/cbdb_parallel.sql
@@ -31,7 +31,7 @@
 --  12  CdbLocusType_HashedWorkers
 --
 --
-set force_parallel_mode = 0;
+set debug_parallel_query=regress;
 set optimizer = off;
 
 create schema test_parallel;
diff --git a/src/test/regress/sql/directory_table.sql 
b/src/test/regress/sql/directory_table.sql
index 664673919c2..0a121831bb3 100644
--- a/src/test/regress/sql/directory_table.sql
+++ b/src/test/regress/sql/directory_table.sql
@@ -12,8 +12,11 @@ SELECT relname, relisshared, relpersistence, relkind FROM 
pg_class WHERE relname
 SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE 
relname = 'gp_storage_server';
 SELECT relname, relisshared, relpersistence, relkind FROM pg_class WHERE 
relname = 'gp_storage_user_mapping';
 
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set testtablespace :abs_builddir '/testtablespace'
+
 -- CREATE TABLESPACE
-CREATE TABLESPACE directory_tblspc LOCATION '@testtablespace@';
+CREATE TABLESPACE directory_tblspc LOCATION :'testtablespace';
 
 -- CREATE DATABASE
 CREATE DATABASE dirtable_db;
@@ -306,84 +309,87 @@ for each statement execute procedure triggertest();
 SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1;
 SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1;
 
-\COPY dir_table1 FROM '@abs_srcdir@/data/nation.csv';    -- fail
-\COPY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation'; -- fail
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv';    -- fail
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation1'; -- fail
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2' 
'nation2'; -- fail
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 
'nation';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 
'nation';    -- fail
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation3' WITH TAG 
'nation2';    -- fail
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation4' WITH TAG 
'nation';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation5' WITH TAG 
'nation' WITH TAG 'nation2';    -- fail
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation6';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation7';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation8';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation9';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation10';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation11';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation12';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation13';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation14';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation15';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation16';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation17';
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation18';
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set nation_file :abs_srcdir '/data/nation.csv'
+\COPY dir_table1 FROM :'nation_file';    -- fail
+\COPY dir_table1 FROM :'nation_file' 'nation'; -- fail
+\COPY BINARY dir_table1 FROM :'nation_file';    -- fail
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation1';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation1'; -- fail
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation2' 'nation2'; -- fail
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation2';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation3' WITH TAG 'nation';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation3' WITH TAG 'nation';    -- 
fail
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation3' WITH TAG 'nation2';    
-- fail
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation4' WITH TAG 'nation';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation5' WITH TAG 'nation' WITH 
TAG 'nation2';    -- fail
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation6';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation7';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation8';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation9';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation10';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation11';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation12';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation13';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation14';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation15';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation16';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation17';
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation18';
 SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1;
 SELECT relative_path, content FROM directory_table('dir_table1') ORDER BY 1;
 
-COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv';     -- fail
-COPY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 'nation';  -- 
fail
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation1';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation1'; -- fail
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation2';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation3' WITH TAG 'nation';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation3' WITH TAG 'nation';    -- fail
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation3' WITH TAG 'nation2';    -- fail
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation4' WITH TAG 'nation';
-COPY BINARY dir_table2 FROM PROGRAM 'cat @abs_srcdir@/data/nation.csv' 
'nation5' WITH TAG 'nation' WITH TAG 'nation2';    -- fail
+\set cat_nation_file 'cat ' :abs_srcdir '/data/nation.csv'
+COPY dir_table2 FROM PROGRAM :'cat_nation_file';     -- fail
+COPY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation';  -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation1';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation1'; -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation2';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation3' WITH TAG 
'nation';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation3' WITH TAG 
'nation';    -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation3' WITH TAG 
'nation2';    -- fail
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation4' WITH TAG 
'nation';
+COPY BINARY dir_table2 FROM PROGRAM :'cat_nation_file' 'nation5' WITH TAG 
'nation' WITH TAG 'nation2';    -- fail
 SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1;
 SELECT relative_path, content FROM directory_table('dir_table2') ORDER BY 1;
 
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation.txt';   -- 
OK
-COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation2.txt';   -- 
OK
-\COPY BINARY "abs.dir_table" FROM '@abs_srcdir@/data/nation.csv' 'aa.bb';    
-- OK
-COPY BINARY "abs.dir_table" FROM '@abs_srcdir@/data/nation.csv' 'cc.dd';    -- 
OK
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation.txt';   -- OK
+COPY BINARY dir_table1 FROM :'nation_file' 'nation2.txt';   -- OK
+\COPY BINARY "abs.dir_table" FROM :'nation_file' 'aa.bb';    -- OK
+COPY BINARY "abs.dir_table" FROM :'nation_file' 'cc.dd';    -- OK
 
 -- Test copy binary from directory table
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(format CSV);
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(freeze off);
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(freeze on);
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(delimiter ',');
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(null ' ');
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(header off);
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(header on);
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(quote ':');
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(escape ':');
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_quote (a));
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_quote *);
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_not_null (a));
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_null (a));
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(convert_selectively (a));
-\COPY BINARY dir_table1 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(encoding 'sql_ascii');
-
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(format CSV);
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(freeze off);
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(freeze on);
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(delimiter ',');
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(null ' ');
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(header off);
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(header on);
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(quote ':');
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(escape ':');
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_quote (a));
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_quote *);
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_not_null (a));
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(force_null (a));
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(convert_selectively (a));
-COPY BINARY dir_table2 FROM '@abs_srcdir@/data/nation.csv' 'nation_failed' 
(encoding 'sql_ascii');
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (format CSV);
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (freeze off);
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (freeze on);
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (delimiter ',');
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (null ' ');
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (header off);
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (header on);
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (quote ':');
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (escape ':');
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_quote (a));
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_quote *);
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_not_null 
(a));
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (force_null (a));
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' 
(convert_selectively (a));
+\COPY BINARY dir_table1 FROM :'nation_file' 'nation_failed' (encoding 
'sql_ascii');
+
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (format CSV);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (freeze off);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (freeze on);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (delimiter ',');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (null ' ');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (header off);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (header on);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (quote ':');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (escape ':');
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_quote (a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_quote *);
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_not_null 
(a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (force_null (a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' 
(convert_selectively (a));
+COPY BINARY dir_table2 FROM :'nation_file' 'nation_failed' (encoding 
'sql_ascii');
 
 -- Test copy file content md5
 CREATE OR REPLACE FUNCTION file_content(text, text) RETURNS BYTEA LANGUAGE SQL 
AS
@@ -406,33 +412,44 @@ SELECT md5_equal('dir_table2', 'nation3');
 SELECT md5_equal('dir_table2', 'nation4');
 
 -- Test Copy To directory table
-\COPY dir_table1 TO '@abs_srcdir@/data/dir_table1';  -- fail
-\COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1';  -- fail
-COPY dir_table1 TO '@abs_srcdir@/data/dir_table1';  -- fail
-COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1';  -- fail
-\COPY dir_table2 TO '@abs_srcdir@/data/dir_table2';  -- fail
-\COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2';  -- fail
-COPY dir_table2 TO '@abs_srcdir@/data/dir_table2';  -- fail
-COPY BINARY dir_table2 TO '@abs_srcdir@/data/dir_table2';  -- fail
-\COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1';   -- fail
-COPY BINARY dir_table1 TO '@abs_srcdir@/data/dir_table1';   -- fail
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO 
'@abs_srcdir@/data/nation1';   -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO 
'@abs_srcdir@/data/nation1';   -- OK
-\COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO 
'@abs_srcdir@/data/unknown';   -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO 
'@abs_srcdir@/data/unknown';    -- OK
+\set dir_table1_file :abs_srcdir '/data/dir_table1'
+\set dir_table12_file :abs_srcdir '/data/dir_table2'
+\set dir_nation1_file :abs_srcdir '/data/dir_nation1'
+\set dir_unknown_file :abs_srcdir '/data/dir_unknown'
+\set nation2_gz 'gzip -c -1 > ' :abs_srcdir '/data/nation2.gz'
+\COPY dir_table1 TO :'dir_table1_file';  -- fail
+\COPY BINARY dir_table1 TO :'dir_table1_file';  -- fail
+COPY dir_table1 TO :'dir_table1_file';  -- fail
+COPY BINARY dir_table1 TO :'dir_table1_file';  -- fail
+\COPY dir_table2 TO :'dir_table12_file';  -- fail
+\COPY BINARY dir_table2 TO :'dir_table12_file';  -- fail
+COPY dir_table2 TO :'dir_table12_file';  -- fail
+COPY BINARY dir_table2 TO :'dir_table12_file';  -- fail
+\COPY BINARY dir_table1 TO :'dir_table1_file';   -- fail
+COPY BINARY dir_table1 TO :'dir_table1_file';   -- fail
+\COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO :'dir_nation1_file';   -- 
OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation1' TO :'dir_nation1_file';   -- 
OK
+\COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO :'dir_unknown_file';   -- 
OK
+COPY BINARY DIRECTORY TABLE dir_table1 'unknown' TO :'dir_unknown_file';    -- 
OK
 \COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK
 COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdin; -- OK
 \COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK
 COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO stdout; -- OK
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > 
@abs_srcdir@/data/nation2.gz';   -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM 'gzip -c -1 > 
@abs_srcdir@/data/nation2.gz';   -- OK
-
-\COPY BINARY DIRECTORY TABLE "abs.dir_table" 'aa.bb' TO 
'@abs_srcdir@/data/aa.bb';   -- OK
-COPY BINARY DIRECTORY TABLE "abs.dir_table" 'cc.dd' TO 
'@abs_srcdir@/data/cc.dd';    -- OK
-\COPY BINARY DIRECTORY TABLE dir_table1 'nation.txt' TO 
'@abs_srcdir@/data/nation.txt'; -- OK
-COPY BINARY DIRECTORY TABLE dir_table1 'nation2.txt' TO 
'@abs_srcdir@/data/nation2.txt'; -- OK
-\COPY BINARY DIRECTORY TABLE public.dir_table1 'nation.txt' TO 
'@abs_srcdir@/data/nation3.txt'; -- OK
-COPY BINARY DIRECTORY TABLE public.dir_table1 'nation2.txt' TO 
'@abs_srcdir@/data/nation4.txt'; -- OK
+\COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM :'nation2_gz';   
-- OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation2' TO PROGRAM :'nation2_gz';   
-- OK
+
+\set aa_bb_file :abs_srcdir '/data/aa.bb'
+\set cc_dd_file :abs_srcdir '/data/cc.dd'
+\set nation_txt_file :abs_srcdir '/data/nation.txt'
+\set nation2_txt_file :abs_srcdir '/data/nation2.txt'
+\set nation3_txt_file :abs_srcdir '/data/nation3.txt'
+\set nation4_txt_file :abs_srcdir '/data/nation4.txt'
+\COPY BINARY DIRECTORY TABLE "abs.dir_table" 'aa.bb' TO :'aa_bb_file';   -- OK
+COPY BINARY DIRECTORY TABLE "abs.dir_table" 'cc.dd' TO :'cc_dd_file';    -- OK
+\COPY BINARY DIRECTORY TABLE dir_table1 'nation.txt' TO :'nation_txt_file'; -- 
OK
+COPY BINARY DIRECTORY TABLE dir_table1 'nation2.txt' TO :'nation2_txt_file'; 
-- OK
+\COPY BINARY DIRECTORY TABLE public.dir_table1 'nation.txt' TO 
:'nation3_txt_file'; -- OK
+COPY BINARY DIRECTORY TABLE public.dir_table1 'nation2.txt' TO 
:'nation4_txt_file'; -- OK
 
 
 SELECT relative_path, size, tag FROM dir_table1 ORDER BY 1;
@@ -521,8 +538,8 @@ SELECT relative_path, size, tag FROM dir_table2 ORDER BY 1;
 CREATE DIRECTORY TABLE dir_table4 TABLESPACE directory_tblspc;
 
 BEGIN;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_commit';
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_commit2' 
WITH TAG 'nation';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_commit';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_commit2' WITH TAG 'nation';
 
 COMMIT;
 SELECT relative_path, content FROM directory_table('dir_table4') ORDER BY 1;
@@ -542,7 +559,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 -- Test transaction rollback of directory table manipulation
 
 BEGIN;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_rollback';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_rollback';
 SELECT relative_path, content FROM directory_table('dir_table4') ORDER BY 1;
 ROLLBACK;
 SELECT relative_path, content FROM directory_table('dir_table4') ORDER BY 1;
@@ -555,7 +572,7 @@ SELECT relative_path, content FROM 
directory_table('dir_table4') ORDER BY 1;
 
 BEGIN;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_rollback2' 
WITH TAG 'nation';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_rollback2' WITH TAG 
'nation';
 UPDATE dir_table4 SET tag = 'nation_updated' WHERE relative_path = 
'nation_rollback2';
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 ROLLBACK;
@@ -564,13 +581,13 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 -- Test subtransaction commit of directory table manipulation
 BEGIN;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit' 
WITH TAG 'nation';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit' WITH TAG 
'nation';
 SAVEPOINT s1;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit2';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit2';
 SAVEPOINT s2;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit3';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit3';
 RELEASE SAVEPOINT s1;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 COMMIT;
@@ -581,7 +598,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 SELECT remove_file('dir_table4', 'nation_subcommit');
 SAVEPOINT s1;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit';
 SAVEPOINT s2;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 RELEASE SAVEPOINT s1;
@@ -605,18 +622,18 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 SELECT remove_file('dir_table4', 'nation_subcommit2');
 SAVEPOINT s1;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 'nation_subcommit4';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subcommit4';
 SAVEPOINT s2;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 ROLLBACK TO SAVEPOINT s1;
 COMMIT;
 
 -- Test subtransaction rollback of directory table manipulation
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 
'nation_subrollback1';
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 
'nation_subrollback2';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback1';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback2';
 BEGIN;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 
'nation_subrollback3';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback3';
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 SAVEPOINT s1;
 SELECT remove_file('dir_table4', 'nation_subrollback1');
@@ -625,7 +642,7 @@ SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 ROLLBACK;
 
 BEGIN;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 
'nation_subrollback4';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback4';
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 SAVEPOINT s1;
 SELECT remove_file('dir_table4', 'nation_subrollback4');
@@ -636,14 +653,14 @@ ROLLBACK;
 
 BEGIN;
 SELECT remove_file('dir_table4', 'nation_subrollback2');
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 
'nation_subrollback5';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback5';
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 SAVEPOINT s1;
 SELECT remove_file('dir_table4', 'nation_subrollback5');
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 ROLLBACK TO SAVEPOINT s1;
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
-COPY BINARY dir_table4 FROM '@abs_srcdir@/data/nation.csv' 
'nation_subrollback6';
+COPY BINARY dir_table4 FROM :'nation_file' 'nation_subrollback6';
 SELECT relative_path, tag FROM dir_table4 ORDER BY 1;
 SAVEPOINT s2;
 ROLLBACK;
diff --git a/src/test/regress/sql/dropdb_check_shared_buffer_cache.sql 
b/src/test/regress/sql/dropdb_check_shared_buffer_cache.sql
index c1b4a60a938..e77930988a0 100644
--- a/src/test/regress/sql/dropdb_check_shared_buffer_cache.sql
+++ b/src/test/regress/sql/dropdb_check_shared_buffer_cache.sql
@@ -1,7 +1,8 @@
 -- Test that dropping a database will drop pages in the shared buffer cache.
-
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set regress_dll :abs_srcdir '/regress.so'
 CREATE OR REPLACE FUNCTION check_shared_buffer_cache_for_dboid(Oid) RETURNS 
BOOL
-AS '@abs_srcdir@/regress.so', 'check_shared_buffer_cache_for_dboid'
+AS :'regress_dll', 'check_shared_buffer_cache_for_dboid'
 LANGUAGE C;
 
 -- Create a new database and a table. This should create entries in the shared
diff --git a/src/test/regress/sql/hooktest.sql 
b/src/test/regress/sql/hooktest.sql
index 5f3f752230b..e86db9b089e 100644
--- a/src/test/regress/sql/hooktest.sql
+++ b/src/test/regress/sql/hooktest.sql
@@ -1,4 +1,6 @@
-LOAD '@abs_builddir@/hooktest/test_hook@DLSUFFIX@';
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set test_hook :abs_builddir '/hooktest/test_hook.so'
+LOAD :'test_hook';
 -----------------------------------
 -- Test planner hook
 -----------------------------------
diff --git a/src/test/regress/sql/part_external_table.sql 
b/src/test/regress/sql/part_external_table.sql
index 3c80981ed83..aba05874964 100644
--- a/src/test/regress/sql/part_external_table.sql
+++ b/src/test/regress/sql/part_external_table.sql
@@ -141,9 +141,9 @@ alter table part add partition exch1 start(60) end (70);
 alter table part add partition exch2 start(70) end (80);
 
 -- exchange with external tables
-\set part-ext_file 'cat > ' :abs_srcdir '/data/part-ext.csv'
-create external web table p3_e (a int, b int) execute :'part-ext_file' format 
'csv' (delimiter as '|' null as 'null' escape as ' ');
-create writable external web table p4_e (a int, b int) execute 
:'part-ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
+\set part_ext_file 'cat > ' :abs_srcdir '/data/part-ext.csv'
+create external web table p3_e (a int, b int) execute :'part_ext_file' format 
'csv' (delimiter as '|' null as 'null' escape as ' ');
+create writable external web table p4_e (a int, b int) execute 
:'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
 
 -- allow exchange readable external table
 alter table part exchange partition exch1 with table p3_e;
@@ -163,8 +163,8 @@ OPTIONS ( filename '/does/not/exist.csv', format 'csv');
 alter table part exchange partition exch2 with table ft3;
 
 -- same tests for attach partition
-create external web table p5_e (a int, b int) execute :'part-ext_file' format 
'csv' (delimiter as '|' null as 'null' escape as ' ');
-create writable external web table p6_e (a int, b int) execute 
:'part-ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
+create external web table p5_e (a int, b int) execute :'part_ext_file' format 
'csv' (delimiter as '|' null as 'null' escape as ' ');
+create writable external web table p6_e (a int, b int) execute 
:'part_ext_file' format 'csv' (delimiter as '|' null as 'null' escape as ' ');
 
 -- allow attach readable external table
 alter table part attach partition p5_e for values from (80) to (90);
diff --git a/src/test/regress/sql/qp_regexp.sql 
b/src/test/regress/sql/qp_regexp.sql
index 09091705c86..99f87f7f7fe 100644
--- a/src/test/regress/sql/qp_regexp.sql
+++ b/src/test/regress/sql/qp_regexp.sql
@@ -25,7 +25,9 @@ phone_num bigint
 )
 distributed by (lname);
 
-\copy public.phone_book from '@abs_srcdir@/data/phone_book.txt' delimiter as 
'|'
+\getenv abs_srcdir PG_ABS_SRCDIR
+\set phone_book_file :abs_srcdir '/data/phone_book.txt'
+\copy public.phone_book from :'phone_book_file' delimiter as '|'
 
 drop table if exists phone_book_substr;
 
diff --git a/src/test/regress/sql/query_info_hook_test.sql 
b/src/test/regress/sql/query_info_hook_test.sql
index bc42af0c41c..7f6f120b737 100644
--- a/src/test/regress/sql/query_info_hook_test.sql
+++ b/src/test/regress/sql/query_info_hook_test.sql
@@ -1,4 +1,6 @@
-LOAD '@abs_builddir@/query_info_hook_test/query_info_hook_test@DLSUFFIX@';
+\getenv abs_builddir PG_ABS_BUILDDIR
+\set query_info_hook_test_dll :abs_builddir 
'/query_info_hook_test/query_info_hook_test.so'
+LOAD :'query_info_hook_test_dll';
 SET client_min_messages='warning';
 SET optimizer=off;
 
diff --git a/src/test/regress/sql/session_reset.sql 
b/src/test/regress/sql/session_reset.sql
index d40433c64db..d78ff4d7dc1 100644
--- a/src/test/regress/sql/session_reset.sql
+++ b/src/test/regress/sql/session_reset.sql
@@ -1,8 +1,9 @@
 
 set log_min_messages to ERROR;
-
+\getenv abs_builddir ABS_BUILDDIR
+\set regress_dll :abs_builddir'/regress.so'
 CREATE OR REPLACE FUNCTION gp_execute_on_server(content int, query text) 
returns text
-language C as '@abs_builddir@/regress@DLSUFFIX@', 'gp_execute_on_server';
+language C as :'regress_dll', 'gp_execute_on_server';
 
 -- terminate backend process for this session on segment with content ID = 0
 select gp_execute_on_server(0, 'select 
pg_terminate_backend(pg_backend_pid())');


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to