On Mon Jan 26, 2026 at 5:42 PM CET, Andres Freund wrote:
I agree that what we currently do is pretty unhelpful.

Attached is v2, which I think fixes all the issues that you mentioned.
From 27d2ab668c2015f25d63a432c1ebd04f2e85afff Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <[email protected]>
Date: Mon, 26 Jan 2026 00:16:30 +0100
Subject: [PATCH v2 5/5] Fail some tests

---
 src/test/authentication/t/001_password.pl     |   2 +-
 .../modules/libpq_pipeline/libpq_pipeline.c   |   2 +-
 .../expected/test_cplusplusext.out            |   2 +-
 src/test/regress/expected/oid8.out            |   2 +-
 src/test/regress/sql/drop_if_exists.sql       |   2 -
 src/test/regress/sql/select_parallel.sql      | 535 ------------------
 src/test/regress/sql/write_parallel.sql       |  22 -
 7 files changed, 4 insertions(+), 563 deletions(-)

diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl
index f4d65ba7bae..27b1132ed45 100644
--- a/src/test/authentication/t/001_password.pl
+++ b/src/test/authentication/t/001_password.pl
@@ -144,7 +144,7 @@ is( $node->psql(
 $node->safe_psql(
 	'postgres',
 	"CREATE TABLE sysuser_data (n) AS SELECT NULL FROM generate_series(1, 10);
-	 GRANT ALL ON sysuser_data TO scram_role;");
+	 GRANT ALL ON sysuser_data TO scram_role '");
 $ENV{"PGPASSWORD"} = 'pass';
 
 # Create a role that contains a comma to stress the parsing.
diff --git a/src/test/modules/libpq_pipeline/libpq_pipeline.c b/src/test/modules/libpq_pipeline/libpq_pipeline.c
index ce1a9995f46..c94ee45346f 100644
--- a/src/test/modules/libpq_pipeline/libpq_pipeline.c
+++ b/src/test/modules/libpq_pipeline/libpq_pipeline.c
@@ -194,7 +194,7 @@ wait_for_connection_state(int line, PGconn *monitorConn, int procpid,
 
 		if (PQresultStatus(res) != PGRES_TUPLES_OK)
 			pg_fatal_impl(line, "could not query pg_stat_activity: %s", PQerrorMessage(monitorConn));
-		if (PQntuples(res) != 1)
+		if (PQntuples(res) == 1)
 			pg_fatal_impl(line, "unexpected number of rows received: %d", PQntuples(res));
 		if (PQnfields(res) != 1)
 			pg_fatal_impl(line, "unexpected number of columns received: %d", PQnfields(res));
diff --git a/src/test/modules/test_cplusplusext/expected/test_cplusplusext.out b/src/test/modules/test_cplusplusext/expected/test_cplusplusext.out
index ab0b04b5c5e..243fe0c2652 100644
--- a/src/test/modules/test_cplusplusext/expected/test_cplusplusext.out
+++ b/src/test/modules/test_cplusplusext/expected/test_cplusplusext.out
@@ -2,6 +2,6 @@ CREATE EXTENSION test_cplusplusext;
 SELECT test_cplusplus_add(1, 2);
  test_cplusplus_add 
 --------------------
-                  3
+                  9
 (1 row)
 
diff --git a/src/test/regress/expected/oid8.out b/src/test/regress/expected/oid8.out
index 2e114f1ce70..3711a116aae 100644
--- a/src/test/regress/expected/oid8.out
+++ b/src/test/regress/expected/oid8.out
@@ -6,7 +6,7 @@ INSERT INTO OID8_TBL(f1) VALUES ('1234');
 INSERT INTO OID8_TBL(f1) VALUES ('1235');
 INSERT INTO OID8_TBL(f1) VALUES ('987');
 INSERT INTO OID8_TBL(f1) VALUES ('-1040');
-INSERT INTO OID8_TBL(f1) VALUES ('99999999');
+INSERT INTO OID8_TBL(f1) VALUES ('88888888');
 INSERT INTO OID8_TBL(f1) VALUES ('5     ');
 INSERT INTO OID8_TBL(f1) VALUES ('   10  ');
 INSERT INTO OID8_TBL(f1) VALUES ('123456789012345678');
diff --git a/src/test/regress/sql/drop_if_exists.sql b/src/test/regress/sql/drop_if_exists.sql
index ac6168b91f8..db4c95f8ed3 100644
--- a/src/test/regress/sql/drop_if_exists.sql
+++ b/src/test/regress/sql/drop_if_exists.sql
@@ -286,8 +286,6 @@ DROP FUNCTION test_ambiguous_funcname(text);
 -- Likewise for procedures.
 CREATE PROCEDURE test_ambiguous_procname(int) as $$ begin end; $$ language plpgsql;
 CREATE PROCEDURE test_ambiguous_procname(text) as $$ begin end; $$ language plpgsql;
-DROP PROCEDURE test_ambiguous_procname;
-DROP PROCEDURE IF EXISTS test_ambiguous_procname;
 
 -- Check we get a similar error if we use ROUTINE instead of PROCEDURE.
 DROP ROUTINE IF EXISTS test_ambiguous_procname;
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index 71a75bc86ea..4c3adb8bdd8 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -53,538 +53,3 @@ explain (costs off)
   select round(avg(aa)), sum(aa) from a_star;
 select round(avg(aa)), sum(aa) from a_star a4;
 reset enable_parallel_append;
-
--- Parallel Append that runs serially
-create function sp_test_func() returns setof text as
-$$ select 'foo'::varchar union all select 'bar'::varchar $$
-language sql stable;
-select sp_test_func() order by 1;
-
--- Parallel Append is not to be used when the subpath depends on the outer param
-create table part_pa_test(a int, b int) partition by range(a);
-create table part_pa_test_p1 partition of part_pa_test for values from (minvalue) to (0);
-create table part_pa_test_p2 partition of part_pa_test for values from (0) to (maxvalue);
-explain (costs off)
-	select (select max((select pa1.b from part_pa_test pa1 where pa1.a = pa2.a)))
-	from part_pa_test pa2;
-drop table part_pa_test;
-
--- test with leader participation disabled
-set parallel_leader_participation = off;
-explain (costs off)
-  select count(*) from tenk1 where stringu1 = 'GRAAAA';
-select count(*) from tenk1 where stringu1 = 'GRAAAA';
-
--- test with leader participation disabled, but no workers available (so
--- the leader will have to run the plan despite the setting)
-set max_parallel_workers = 0;
-explain (costs off)
-  select count(*) from tenk1 where stringu1 = 'GRAAAA';
-select count(*) from tenk1 where stringu1 = 'GRAAAA';
-
-reset max_parallel_workers;
-reset parallel_leader_participation;
-
--- test that parallel_restricted function doesn't run in worker
-alter table tenk1 set (parallel_workers = 4);
-explain (verbose, costs off)
-select sp_parallel_restricted(unique1) from tenk1
-  where stringu1 = 'GRAAAA' order by 1;
-
--- test parallel plan when group by expression is in target list.
-explain (costs off)
-	select length(stringu1) from tenk1 group by length(stringu1);
-select length(stringu1) from tenk1 group by length(stringu1);
-
-explain (costs off)
-	select stringu1, count(*) from tenk1 group by stringu1 order by stringu1;
-
--- test that parallel plan for aggregates is not selected when
--- target list contains parallel restricted clause.
-explain (costs off)
-	select  sum(sp_parallel_restricted(unique1)) from tenk1
-	group by(sp_parallel_restricted(unique1));
-
--- test prepared statement
-prepare tenk1_count(integer) As select  count((unique1)) from tenk1 where hundred > $1;
-explain (costs off) execute tenk1_count(1);
-execute tenk1_count(1);
-deallocate tenk1_count;
-
--- test parallel plans for queries containing un-correlated subplans.
-alter table tenk2 set (parallel_workers = 0);
-explain (costs off)
-	select count(*) from tenk1 where (two, four) not in
-	(select hundred, thousand from tenk2 where thousand > 100);
-select count(*) from tenk1 where (two, four) not in
-	(select hundred, thousand from tenk2 where thousand > 100);
--- this is not parallel-safe due to use of random() within SubLink's testexpr:
-explain (costs off)
-	select * from tenk1 where (unique1 + random())::integer not in
-	(select ten from tenk2);
-alter table tenk2 reset (parallel_workers);
-
--- test parallel plan for a query containing initplan.
-set enable_indexscan = off;
-set enable_indexonlyscan = off;
-set enable_bitmapscan = off;
-alter table tenk2 set (parallel_workers = 2);
-
-explain (costs off)
-	select count(*) from tenk1
-        where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2);
-select count(*) from tenk1
-    where tenk1.unique1 = (Select max(tenk2.unique1) from tenk2);
-
-reset enable_indexscan;
-reset enable_indexonlyscan;
-reset enable_bitmapscan;
-alter table tenk2 reset (parallel_workers);
-
--- test parallel index scans.
-set enable_seqscan to off;
-set enable_bitmapscan to off;
-set random_page_cost = 2;
-
-explain (costs off)
-	select  count((unique1)) from tenk1 where hundred > 1;
-select  count((unique1)) from tenk1 where hundred > 1;
-
--- Parallel ScalarArrayOp index scan
-explain (costs off)
-  select count((unique1)) from tenk1
-  where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]);
-select count((unique1)) from tenk1
-where hundred = any ((select array_agg(i) from generate_series(1, 100, 15) i)::int[]);
-
--- test parallel index-only scans.
-explain (costs off)
-	select  count(*) from tenk1 where thousand > 95;
-select  count(*) from tenk1 where thousand > 95;
-
--- test rescan cases too
-set enable_material = false;
-
-explain (costs off)
-select * from
-  (select count(unique1) from tenk1 where hundred > 10) ss
-  right join (values (1),(2),(3)) v(x) on true;
-select * from
-  (select count(unique1) from tenk1 where hundred > 10) ss
-  right join (values (1),(2),(3)) v(x) on true;
-
-explain (costs off)
-select * from
-  (select count(*) from tenk1 where thousand > 99) ss
-  right join (values (1),(2),(3)) v(x) on true;
-select * from
-  (select count(*) from tenk1 where thousand > 99) ss
-  right join (values (1),(2),(3)) v(x) on true;
-
--- test rescans for a Limit node with a parallel node beneath it.
-reset enable_seqscan;
-set enable_indexonlyscan to off;
-set enable_indexscan to off;
-alter table tenk1 set (parallel_workers = 0);
-alter table tenk2 set (parallel_workers = 1);
-explain (costs off)
-select count(*) from tenk1
-  left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
-  on tenk1.unique1 < ss.unique1 + 1
-  where tenk1.unique1 < 2;
-select count(*) from tenk1
-  left join (select tenk2.unique1 from tenk2 order by 1 limit 1000) ss
-  on tenk1.unique1 < ss.unique1 + 1
-  where tenk1.unique1 < 2;
---reset the value of workers for each table as it was before this test.
-alter table tenk1 set (parallel_workers = 4);
-alter table tenk2 reset (parallel_workers);
-
-reset enable_material;
-reset enable_bitmapscan;
-reset enable_indexonlyscan;
-reset enable_indexscan;
-
--- test parallel bitmap heap scan.
-set enable_seqscan to off;
-set enable_indexscan to off;
-set enable_hashjoin to off;
-set enable_mergejoin to off;
-set enable_material to off;
--- test prefetching, if the platform allows it
-DO $$
-BEGIN
- SET effective_io_concurrency = 50;
-EXCEPTION WHEN invalid_parameter_value THEN
-END $$;
-set work_mem='64kB';  --set small work mem to force lossy pages
-explain (costs off)
-	select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0;
-select count(*) from tenk1, tenk2 where tenk1.hundred > 1 and tenk2.thousand=0;
-
-create table bmscantest (a int, t text);
-insert into bmscantest select r, 'fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' FROM generate_series(1,100000) r;
-create index i_bmtest ON bmscantest(a);
-select count(*) from bmscantest where a>1;
-
--- test accumulation of stats for parallel nodes
-reset enable_seqscan;
-alter table tenk2 set (parallel_workers = 0);
-explain (analyze, timing off, summary off, costs off, buffers off)
-   select count(*) from tenk1, tenk2 where tenk1.hundred > 1
-        and tenk2.thousand=0;
-alter table tenk2 reset (parallel_workers);
-
-reset work_mem;
-create function explain_parallel_sort_stats() returns setof text
-language plpgsql as
-$$
-declare ln text;
-begin
-    for ln in
-        explain (analyze, timing off, summary off, costs off, buffers off)
-          select * from
-          (select ten from tenk1 where ten < 100 order by ten) ss
-          right join (values (1),(2),(3)) v(x) on true
-    loop
-        ln := regexp_replace(ln, 'Memory: \S*',  'Memory: xxx');
-        return next ln;
-    end loop;
-end;
-$$;
-select * from explain_parallel_sort_stats();
-
-reset enable_indexscan;
-reset enable_hashjoin;
-reset enable_mergejoin;
-reset enable_material;
-reset effective_io_concurrency;
-drop table bmscantest;
-drop function explain_parallel_sort_stats();
-
--- test parallel merge join path.
-set enable_hashjoin to off;
-set enable_nestloop to off;
-
-explain (costs off)
-	select  count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1;
-select  count(*) from tenk1, tenk2 where tenk1.unique1 = tenk2.unique1;
-
-reset enable_hashjoin;
-reset enable_nestloop;
-
--- test parallel nestloop join path with materialization of the inner path
-alter table tenk2 set (parallel_workers = 0);
-explain (costs off)
-select * from tenk1 t1, tenk2 t2 where t1.two > t2.two;
-
--- test that parallel nestloop join is not generated if the inner path is
--- not parallel-safe
-explain (costs off)
-select * from tenk1 t1
-    left join lateral
-      (select t1.unique1 as x, * from tenk2 t2 order by 1) t2
-    on true
-where t1.two > t2.two;
-alter table tenk2 reset (parallel_workers);
-
--- test gather merge
-set enable_hashagg = false;
-
-explain (costs off)
-   select count(*) from tenk1 group by twenty;
-
-select count(*) from tenk1 group by twenty;
-
---test expressions in targetlist are pushed down for gather merge
-create function sp_simple_func(var1 integer) returns integer
-as $$
-begin
-        return var1 + 10;
-end;
-$$ language plpgsql PARALLEL SAFE;
-
-explain (costs off, verbose)
-    select ten, sp_simple_func(ten) from tenk1 where ten < 100 order by ten;
-
-drop function sp_simple_func(integer);
-
--- test handling of SRFs in targetlist (bug in 10.0)
-
-explain (costs off)
-   select count(*), generate_series(1,2) from tenk1 group by twenty;
-
-select count(*), generate_series(1,2) from tenk1 group by twenty;
-
--- test gather merge with parallel leader participation disabled
-set parallel_leader_participation = off;
-
-explain (costs off)
-   select count(*) from tenk1 group by twenty;
-
-select count(*) from tenk1 group by twenty;
-
-reset parallel_leader_participation;
-
---test rescan behavior of gather merge
-set enable_material = false;
-
-explain (costs off)
-select * from
-  (select string4, count(unique2)
-   from tenk1 group by string4 order by string4) ss
-  right join (values (1),(2),(3)) v(x) on true;
-
-select * from
-  (select string4, count(unique2)
-   from tenk1 group by string4 order by string4) ss
-  right join (values (1),(2),(3)) v(x) on true;
-
-reset enable_material;
-
-reset enable_hashagg;
-
--- check parallelized int8 aggregate (bug #14897)
-explain (costs off)
-select avg(unique1::int8) from tenk1;
-
-select avg(unique1::int8) from tenk1;
-
--- gather merge test with a LIMIT
-explain (costs off)
-  select fivethous from tenk1 order by fivethous limit 4;
-
-select fivethous from tenk1 order by fivethous limit 4;
-
--- gather merge test with 0 worker
-set max_parallel_workers = 0;
-explain (costs off)
-   select string4 from tenk1 order by string4 limit 5;
-select string4 from tenk1 order by string4 limit 5;
-
--- gather merge test with 0 workers, with parallel leader
--- participation disabled (the leader will have to run the plan
--- despite the setting)
-set parallel_leader_participation = off;
-explain (costs off)
-   select string4 from tenk1 order by string4 limit 5;
-select string4 from tenk1 order by string4 limit 5;
-
-reset parallel_leader_participation;
-reset max_parallel_workers;
-
-create function parallel_safe_volatile(a int) returns int as
-  $$ begin return a; end; $$ parallel safe volatile language plpgsql;
-
--- Test gather merge atop of a sort of a partial path
-explain (costs off)
-select * from tenk1 where four = 2
-order by four, hundred, parallel_safe_volatile(thousand);
-
--- Test gather merge atop of an incremental sort a of partial path
-set min_parallel_index_scan_size = 0;
-set enable_seqscan = off;
-
-explain (costs off)
-select * from tenk1 where four = 2
-order by four, hundred, parallel_safe_volatile(thousand);
-
-reset min_parallel_index_scan_size;
-reset enable_seqscan;
-
--- Test GROUP BY with a gather merge path atop of a sort of a partial path
-explain (costs off)
-select count(*) from tenk1
-group by twenty, parallel_safe_volatile(two);
-
-drop function parallel_safe_volatile(int);
-
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-explain (costs off)
-  select stringu1::int2 from tenk1 where unique1 = 1;
-ROLLBACK TO SAVEPOINT settings;
-
--- exercise record typmod remapping between backends
-CREATE FUNCTION make_record(n int)
-  RETURNS RECORD LANGUAGE plpgsql PARALLEL SAFE AS
-$$
-BEGIN
-  RETURN CASE n
-           WHEN 1 THEN ROW(1)
-           WHEN 2 THEN ROW(1, 2)
-           WHEN 3 THEN ROW(1, 2, 3)
-           WHEN 4 THEN ROW(1, 2, 3, 4)
-           ELSE ROW(1, 2, 3, 4, 5)
-         END;
-END;
-$$;
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-SELECT make_record(x) FROM (SELECT generate_series(1, 5) x) ss ORDER BY x;
-ROLLBACK TO SAVEPOINT settings;
-DROP function make_record(n int);
-
--- test the sanity of parallel query after the active role is dropped.
-drop role if exists regress_parallel_worker;
-create role regress_parallel_worker;
-set role regress_parallel_worker;
-reset session authorization;
-drop role regress_parallel_worker;
-set debug_parallel_query = 1;
-select count(*) from tenk1;
-reset debug_parallel_query;
-reset role;
-
--- Window function calculation can't be pushed to workers.
-explain (costs off, verbose)
-  select count(*) from tenk1 a where (unique1, two) in
-    (select unique1, row_number() over() from tenk1 b);
-
-
--- LIMIT/OFFSET within sub-selects can't be pushed to workers.
-explain (costs off)
-  select * from tenk1 a where two in
-    (select two from tenk1 b where stringu1 like '%AAAA' limit 3);
-
--- to increase the parallel query test coverage
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-EXPLAIN (analyze, timing off, summary off, costs off, buffers off) SELECT * FROM tenk1;
-ROLLBACK TO SAVEPOINT settings;
-
--- provoke error in worker
--- (make the error message long enough to require multiple bufferloads)
-SAVEPOINT settings;
-SET LOCAL debug_parallel_query = 1;
-select (stringu1 || repeat('abcd', 5000))::int2 from tenk1 where unique1 = 1;
-ROLLBACK TO SAVEPOINT settings;
-
--- test interaction with set-returning functions
-SAVEPOINT settings;
-
--- multiple subqueries under a single Gather node
--- must set parallel_setup_cost > 0 to discourage multiple Gather nodes
-SET LOCAL parallel_setup_cost = 10;
-EXPLAIN (COSTS OFF)
-SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1
-UNION ALL
-SELECT unique1 FROM tenk1 WHERE fivethous = tenthous + 1;
-ROLLBACK TO SAVEPOINT settings;
-
--- can't use multiple subqueries under a single Gather node due to initPlans
-EXPLAIN (COSTS OFF)
-SELECT unique1 FROM tenk1 WHERE fivethous =
-	(SELECT unique1 FROM tenk1 WHERE fivethous = 1 LIMIT 1)
-UNION ALL
-SELECT unique1 FROM tenk1 WHERE fivethous =
-	(SELECT unique2 FROM tenk1 WHERE fivethous = 1 LIMIT 1)
-ORDER BY 1;
-
--- test interaction with SRFs
-SELECT * FROM information_schema.foreign_data_wrapper_options
-ORDER BY 1, 2, 3;
-
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT generate_series(1, two), array(select generate_series(1, two))
-  FROM tenk1 ORDER BY tenthous;
-
--- must disallow pushing sort below gather when pathkey contains an SRF
-EXPLAIN (VERBOSE, COSTS OFF)
-SELECT unnest(ARRAY[]::integer[]) + 1 AS pathkey
-  FROM tenk1 t1 JOIN tenk1 t2 ON TRUE
-  ORDER BY pathkey;
-
--- test passing expanded-value representations to workers
-CREATE FUNCTION make_some_array(int,int) returns int[] as
-$$declare x int[];
-  begin
-    x[1] := $1;
-    x[2] := $2;
-    return x;
-  end$$ language plpgsql parallel safe;
-CREATE TABLE fooarr(f1 text, f2 int[], f3 text);
-INSERT INTO fooarr VALUES('1', ARRAY[1,2], 'one');
-
-PREPARE pstmt(text, int[]) AS SELECT * FROM fooarr WHERE f1 = $1 AND f2 = $2;
-EXPLAIN (COSTS OFF) EXECUTE pstmt('1', make_some_array(1,2));
-EXECUTE pstmt('1', make_some_array(1,2));
-DEALLOCATE pstmt;
-
--- test interaction between subquery and partial_paths
-CREATE VIEW tenk1_vw_sec WITH (security_barrier) AS SELECT * FROM tenk1;
-EXPLAIN (COSTS OFF)
-SELECT 1 FROM tenk1_vw_sec
-  WHERE (SELECT sum(f1) FROM int4_tbl WHERE f1 < unique1) < 100;
-
-rollback;
-
--- test that a newly-created session role propagates to workers.
-begin;
-create role regress_parallel_worker;
-set session authorization regress_parallel_worker;
-select current_setting('session_authorization');
-set debug_parallel_query = 1;
-select current_setting('session_authorization');
-rollback;
-
--- test that function option SET ROLE works in parallel workers.
-create role regress_parallel_worker;
-
-create function set_and_report_role() returns text as
-  $$ select current_setting('role') $$ language sql parallel safe
-  set role = regress_parallel_worker;
-
-create function set_role_and_error(int) returns int as
-  $$ select 1 / $1 $$ language sql parallel safe
-  set role = regress_parallel_worker;
-
-set debug_parallel_query = 0;
-select set_and_report_role();
-select set_role_and_error(0);
-set debug_parallel_query = 1;
-select set_and_report_role();
-select set_role_and_error(0);
-reset debug_parallel_query;
-
-drop function set_and_report_role();
-drop function set_role_and_error(int);
-drop role regress_parallel_worker;
-
--- don't freeze in ParallelFinish while holding an LWLock
-BEGIN;
-
-CREATE FUNCTION my_cmp (int4, int4)
-RETURNS int LANGUAGE sql AS
-$$
-	SELECT
-		CASE WHEN $1 < $2 THEN -1
-				WHEN $1 > $2 THEN  1
-				ELSE 0
-		END;
-$$;
-
-CREATE TABLE parallel_hang (i int4);
-INSERT INTO parallel_hang
-	(SELECT * FROM generate_series(1, 400) gs);
-
-CREATE OPERATOR CLASS int4_custom_ops FOR TYPE int4 USING btree AS
-	OPERATOR 1 < (int4, int4), OPERATOR 2 <= (int4, int4),
-	OPERATOR 3 = (int4, int4), OPERATOR 4 >= (int4, int4),
-	OPERATOR 5 > (int4, int4), FUNCTION 1 my_cmp(int4, int4);
-
-CREATE UNIQUE INDEX parallel_hang_idx
-					ON parallel_hang
-					USING btree (i int4_custom_ops);
-
-SET debug_parallel_query = on;
-DELETE FROM parallel_hang WHERE 380 <= i AND i <= 420;
-
-ROLLBACK;
-
--- Check parallel worker stats
-select pg_stat_force_next_flush();
-select parallel_workers_to_launch > :'parallel_workers_to_launch_before'  AS wrk_to_launch,
-       parallel_workers_launched > :'parallel_workers_launched_before' AS wrk_launched
-  from pg_stat_database
-  where datname = current_database();
diff --git a/src/test/regress/sql/write_parallel.sql b/src/test/regress/sql/write_parallel.sql
index ae660dc2265..1b9b4c04fdd 100644
--- a/src/test/regress/sql/write_parallel.sql
+++ b/src/test/regress/sql/write_parallel.sql
@@ -19,25 +19,3 @@ explain (costs off) create table parallel_write as
 create table parallel_write as
     select length(stringu1) from tenk1 group by length(stringu1);
 drop table parallel_write;
-
-explain (costs off) select length(stringu1) into parallel_write
-    from tenk1 group by length(stringu1);
-select length(stringu1) into parallel_write
-    from tenk1 group by length(stringu1);
-drop table parallel_write;
-
-explain (costs off) create materialized view parallel_mat_view as
-    select length(stringu1) from tenk1 group by length(stringu1);
-create materialized view parallel_mat_view as
-    select length(stringu1) from tenk1 group by length(stringu1);
-create unique index on parallel_mat_view(length);
-refresh materialized view parallel_mat_view;
-refresh materialized view concurrently parallel_mat_view;
-drop materialized view parallel_mat_view;
-
-prepare prep_stmt as select length(stringu1) from tenk1 group by length(stringu1);
-explain (costs off) create table parallel_write as execute prep_stmt;
-create table parallel_write as execute prep_stmt;
-drop table parallel_write;
-
-rollback;
-- 
2.52.0

From af8b5211697c43ec5a10b5492d2ac5238b53ca83 Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <[email protected]>
Date: Mon, 26 Jan 2026 00:15:30 +0100
Subject: [PATCH v2 1/5] pg_regress: Include diffs in output

Whenever pg_regress fails there's an indirection to actually get to the
failure reason. Locally it's possible to copy paste the filename and
open the file, but in CI it's necessary to manually traverse the
directory structure by clicking and scrolling a bunch of time.

This change starts printing the first 80 lines of the regression.diffs
file as TAP diagnostics. So it's not necessary to open the
regression.diffs file in many cases.
---
 src/test/regress/pg_regress.c | 52 +++++++++++++++++++++++++++++------
 1 file changed, 44 insertions(+), 8 deletions(-)

diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index b5c0cb647a8..5ce4d24d095 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -1414,6 +1414,7 @@ results_differ(const char *testname, const char *resultsfile, const char *defaul
 	int			best_line_count;
 	int			i;
 	int			l;
+	long		startpos;
 	const char *platform_expectfile;
 
 	/*
@@ -1521,22 +1522,57 @@ results_differ(const char *testname, const char *resultsfile, const char *defaul
 	 * append to the diffs summary file.
 	 */
 
-	/* Write diff header */
-	difffile = fopen(difffilename, "a");
+	difffile = fopen(difffilename, "a+");
 	if (difffile)
 	{
+		fseek(difffile, 0, SEEK_END);
+		startpos = ftell(difffile);
 		fprintf(difffile,
 				"diff %s %s %s\n",
 				pretty_diff_opts, best_expect_file, resultsfile);
+		fflush(difffile);
+
+		/* Run diff */
+		snprintf(cmd, sizeof(cmd),
+				 "diff %s \"%s\" \"%s\" >> \"%s\"",
+				 pretty_diff_opts, best_expect_file, resultsfile, difffilename);
+		run_diff(cmd, difffilename);
+
+		/* Emit diff as TAP diagnostics */
+		{
+			/*
+			 * In case of a crash the diff can be huge and all of the
+			 * subsequent tests will fail with essentially useless diffs too.
+			 * So to avoid flooding the output, while still providing useful
+			 * info in most cases we output only the first 80 lines of the
+			 * *combined* diff. The number 80 is chosen so that we output less
+			 * than 100 lines of diagnostics per pg_regress run. Otherwise if
+			 * meson is run with the --quiet flag only the last 100 lines are
+			 * shown and usually the most useful information is actually in
+			 * the first few lines
+			 */
+			static int	nlines = 0;
+			const int	max_diff_lines = 80;
+			char		line[1024];
+
+			fseek(difffile, startpos, SEEK_SET);
+			while (nlines <= max_diff_lines &&
+				   fgets(line, sizeof(line), difffile))
+			{
+				size_t		len = strlen(line);
+
+				if (len > 0 && line[len - 1] == '\n')
+					line[len - 1] = '\0';
+
+				if (++nlines > max_diff_lines)
+					diag("(diff output truncated and silencing output for further failing tests...)");
+				else
+					diag("%s", line);
+			}
+		}
 		fclose(difffile);
 	}
 
-	/* Run diff */
-	snprintf(cmd, sizeof(cmd),
-			 "diff %s \"%s\" \"%s\" >> \"%s\"",
-			 pretty_diff_opts, best_expect_file, resultsfile, difffilename);
-	run_diff(cmd, difffilename);
-
 	unlink(diff);
 	return true;
 }

base-commit: 851f6649cc18c4b482fa2b6afddb65b35d035370
-- 
2.52.0

From c17362a210bf0768571d069f90c2eea004159609 Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <[email protected]>
Date: Mon, 26 Jan 2026 09:09:11 +0100
Subject: [PATCH v2 2/5] perl tap: Show failed command output

This adds the output of failed commands to the TAP output. Before a
failed libpq_pipeline test would look like this:

  Failed test 'libpq_pipeline cancel'
  at /home/jelte/work/postgres-3/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl line 55.

Now you can actually see the reason of the failure:

  Failed test 'libpq_pipeline cancel'
  at /home/jelte/work/postgres-3/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl line 55.
---------- command failed ----------
libpq_pipeline -r 700 cancel port=22067 host=/tmp/u1owq5Ajit dbname='postgres' max_protocol_version=latest
-------------- stderr --------------
test cancellations...
libpq_pipeline:315: unexpected number of rows received: 1
------------------------------------

To make sure the output is not flooded. Only the first 30 and last 30
lines of both stderr and stdout are shown.

This also changes the 001_start_stop.pl test to configure a logfile
during pg_ctl restart. Otherwise IPC::Run call will hang indefinitely,
because the stdout file descriptor won't be closed on process exit.
---
 src/bin/pg_ctl/t/001_start_stop.pl     |  2 +-
 src/test/perl/PostgreSQL/Test/Utils.pm | 52 +++++++++++++++++++++++---
 2 files changed, 47 insertions(+), 7 deletions(-)

diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl
index 9b79de319f2..4a25b35ed9c 100644
--- a/src/bin/pg_ctl/t/001_start_stop.pl
+++ b/src/bin/pg_ctl/t/001_start_stop.pl
@@ -112,7 +112,7 @@ SKIP:
 	ok(check_mode_recursive("$tempdir/data", 0750, 0640));
 }
 
-command_ok([ 'pg_ctl', 'restart', '--pgdata' => "$tempdir/data" ],
+command_ok([ 'pg_ctl', 'restart', '--pgdata' => "$tempdir/data", '--log' => $logFileName ],
 	'pg_ctl restart with server running');
 
 system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data";
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index ff843eecc6e..5c4586558bc 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -937,6 +937,36 @@ sub dir_symlink
 	die "No $newname" unless -e $newname;
 }
 
+# Log command output. Truncates to first/last 30 lines if over 60 lines.
+sub _diag_command_output
+{
+	my ($cmd, $stdout, $stderr) = @_;
+
+	diag(join(" ", @$cmd));
+
+	for my $channel (['stdout', $stdout], ['stderr', $stderr])
+	{
+		my ($name, $output) = @$channel;
+		next unless $output;
+
+		diag("-------------- $name --------------");
+		my @lines = split /\n/, $output;
+		if (@lines > 60)
+		{
+			diag(join("\n", @lines[0 .. 29]));
+			diag("... " . (@lines - 60) . " lines omitted ...");
+			diag(join("\n", @lines[-30 .. -1]));
+		}
+		else
+		{
+			diag($output);
+		}
+	}
+
+	diag("------------------------------------");
+}
+
+
 =pod
 
 =back
@@ -947,7 +977,7 @@ sub dir_symlink
 
 =item command_ok(cmd, test_name)
 
-Check that the command runs (via C<run_log>) successfully.
+Check that the command runs successfully.
 
 =cut
 
@@ -955,8 +985,13 @@ sub command_ok
 {
 	local $Test::Builder::Level = $Test::Builder::Level + 1;
 	my ($cmd, $test_name) = @_;
-	my $result = run_log($cmd);
-	ok($result, $test_name);
+	my ($stdout, $stderr);
+	my $result = IPC::Run::run $cmd, '>' => \$stdout, '2>' => \$stderr;
+	ok($result, $test_name) or do
+	{
+		diag("---------- command failed ----------");
+		_diag_command_output($cmd, $stdout, $stderr);
+	};
 	return;
 }
 
@@ -964,7 +999,7 @@ sub command_ok
 
 =item command_fails(cmd, test_name)
 
-Check that the command fails (when run via C<run_log>).
+Check that the command fails.
 
 =cut
 
@@ -972,8 +1007,13 @@ sub command_fails
 {
 	local $Test::Builder::Level = $Test::Builder::Level + 1;
 	my ($cmd, $test_name) = @_;
-	my $result = run_log($cmd);
-	ok(!$result, $test_name);
+	my ($stdout, $stderr);
+	my $result = IPC::Run::run $cmd, '>' => \$stdout, '2>' => \$stderr;
+	ok(!$result, $test_name) or do
+	{
+		diag("-- command succeeded unexpectedly --");
+		_diag_command_output($cmd, $stdout, $stderr);
+	};
 	return;
 }
 
-- 
2.52.0

From df8269d2b7cbae9d73bc1f046babd44c0c44ebea Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <[email protected]>
Date: Mon, 26 Jan 2026 09:32:15 +0100
Subject: [PATCH v2 3/5] perl tap: Show die reason in TAP output

In our Perl tests the most commonly used function is probably safe_psql.
But if that call failed you would get this totally useless output in the
meson output:

Tests were run but no plan was declared and done_testing() was not seen.
Looks like your test exited with 29 just after 21.

With this change you get the actual failure reason too:

die: error running SQL: 'psql:<stdin>:2: ERROR:  unterminated quoted string at or near "'"
LINE 1: GRANT ALL ON sysuser_data TO scram_role '
                                                ^'
while running 'psql --no-psqlrc --no-align --tuples-only --quiet --dbname port=17335 host=/tmp/y9KX6JADha dbname='postgres' --file - --variable ON_ERROR_STOP=1' with sql 'CREATE TABLE sysuser_data (n) AS SELECT NULL FROM generate_series(1, 10);
   GRANT ALL ON sysuser_data TO scram_role '' at /home/jelte/work/postgres-3/src/test/perl/PostgreSQL/Test/Cluster.pm line 2300.
Looks like your test exited with 29 just after 21.

Discussion: https://www.postgresql.org/message-id/20220222181924.eehi7o4pmneeb4hm%40alap3.anarazel.de
Discussion: https://www.postgresql.org/message-id/flat/[email protected]
---
 src/test/perl/PostgreSQL/Test/Utils.pm | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)

diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index 5c4586558bc..39f76c0aaba 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -244,6 +244,24 @@ INIT
 	autoflush STDOUT 1;
 	autoflush STDERR 1;
 	autoflush $testlog 1;
+
+	# Because of the above redirection the tap output wouldn't contain
+	# information about tests failing due to die etc. Fix that by also
+	# printing the failure to the original stderr.
+	$SIG{__DIE__} = sub {
+		# Ignore dies because of syntax errors, those will be displayed
+		# correctly anyway.
+		return if !defined $^S;
+
+		# Ignore dies inside evals
+		return if $^S == 1;
+
+		diag("die: $_[0]");
+		# Also call done_testing() to avoid the confusing "no plan was declared"
+		# message in TAP output when a test dies.
+		done_testing();
+	};
+
 }
 
 END
-- 
2.52.0

From c79d91128ab3eb5c1f86fbdb83f2b3e56161ceb5 Mon Sep 17 00:00:00 2001
From: Jelte Fennema-Nio <[email protected]>
Date: Mon, 26 Jan 2026 10:04:44 +0100
Subject: [PATCH v2 4/5] perl tap: Use croak instead of die in our perl helper
 modules

Many of our helpers called die, but that means that the line number
reported on exit is the line number in Cluster.pm  or Utils.pm. This
starts using croak everywhere (except for INIT) in our Cluster.pm and
Utils.pm files. That way the line number of the caller is reported
instead.

As an example, this is the new output the safe_psql call that fails:

die: error running SQL: 'psql:<stdin>:2: ERROR:  unterminated quoted string at or near "'"
LINE 1: GRANT ALL ON sysuser_data TO scram_role '
                                                ^'
while running 'psql --no-psqlrc --no-align --tuples-only --quiet --dbname port=26578 host=/tmp/KaOjou8HJa dbname='postgres' --file - --variable ON_ERROR_STOP=1' with sql 'CREATE TABLE sysuser_data (n) AS SELECT NULL FROM generate_series(1, 10);
   GRANT ALL ON sysuser_data TO scram_role '' at /home/jelte/work/postgres-3/src/test/authentication/t/001_password.pl line 144.
Looks like your test exited with 29 just after 21.
---
 src/test/perl/PostgreSQL/Test/Cluster.pm | 108 +++++++++++------------
 src/test/perl/PostgreSQL/Test/Utils.pm   |  26 +++---
 2 files changed, 67 insertions(+), 67 deletions(-)

diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index e267ba868fe..b62aaa58cc4 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -57,7 +57,7 @@ PostgreSQL::Test::Cluster - class representing PostgreSQL server instance
   # run query every second until it returns 't'
   # or times out
   $node->poll_query_until('postgres', q|SELECT random() < 0.1;|')
-    or die "timed out";
+    or croak "timed out";
 
   # Do an online pg_basebackup
   my $ret = $node->backup('testbackup1');
@@ -339,7 +339,7 @@ sub raw_connect
 		$socket = IO::Socket::UNIX->new(
 			Type => SOCK_STREAM(),
 			Peer => $path,
-		) or die "Cannot create socket - $IO::Socket::errstr\n";
+		) or croak "Cannot create socket - $IO::Socket::errstr\n";
 	}
 	else
 	{
@@ -347,7 +347,7 @@ sub raw_connect
 			PeerHost => $pghost,
 			PeerPort => $pgport,
 			Proto => 'tcp'
-		) or die "Cannot create socket - $IO::Socket::errstr\n";
+		) or croak "Cannot create socket - $IO::Socket::errstr\n";
 	}
 	return $socket;
 }
@@ -406,7 +406,7 @@ sub group_access
 	my $dir_stat = stat($self->data_dir);
 
 	defined($dir_stat)
-	  or die('unable to stat ' . $self->data_dir);
+	  or croak('unable to stat ' . $self->data_dir);
 
 	return (S_IMODE($dir_stat->mode) == 0750);
 }
@@ -508,7 +508,7 @@ sub config_data
 	my $result =
 	  IPC::Run::run [ $self->installed_command('pg_config'), @options ],
 	  '>', \$stdout, '2>', \$stderr
-	  or die "could not execute pg_config";
+	  or croak "could not execute pg_config";
 	# standardize line endings
 	$stdout =~ s/\r(?=\n)//g;
 	# no options, scalar context: just hand back the output
@@ -542,7 +542,7 @@ sub info
 {
 	my ($self) = @_;
 	my $_info = '';
-	open my $fh, '>', \$_info or die;
+	open my $fh, '>', \$_info or croak;
 	print $fh "Name: " . $self->name . "\n";
 	print $fh "Version: " . $self->{_pg_version} . "\n"
 	  if $self->{_pg_version};
@@ -553,7 +553,7 @@ sub info
 	print $fh "Log file: " . $self->logfile . "\n";
 	print $fh "Install Path: ", $self->{_install_path} . "\n"
 	  if $self->{_install_path};
-	close $fh or die;
+	close $fh or croak;
 	return $_info;
 }
 
@@ -583,7 +583,7 @@ sub set_replication_conf
 	$self->host eq $test_pghost
 	  or croak "set_replication_conf only works with the default host";
 
-	open my $hba, '>>', "$pgdata/pg_hba.conf" or die $!;
+	open my $hba, '>>', "$pgdata/pg_hba.conf" or croak $!;
 	print $hba
 	  "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
 	if ($PostgreSQL::Test::Utils::windows_os
@@ -707,7 +707,7 @@ sub init
 	PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS},
 		'--config-auth', $pgdata, @{ $params{auth_extra} });
 
-	open my $conf, '>>', "$pgdata/postgresql.conf" or die $!;
+	open my $conf, '>>', "$pgdata/postgresql.conf" or croak $!;
 	print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
 	print $conf "fsync = off\n";
 	print $conf "restart_after_crash = off\n";
@@ -764,7 +764,7 @@ sub init
 	close $conf;
 
 	chmod($self->group_access ? 0640 : 0600, "$pgdata/postgresql.conf")
-	  or die("unable to set permissions for $pgdata/postgresql.conf");
+	  or croak("unable to set permissions for $pgdata/postgresql.conf");
 
 	$self->set_replication_conf if $params{allows_streaming};
 	$self->enable_archiving if $params{has_archiving};
@@ -793,7 +793,7 @@ sub append_conf
 	PostgreSQL::Test::Utils::append_to_file($conffile, $str . "\n");
 
 	chmod($self->group_access() ? 0640 : 0600, $conffile)
-	  or die("unable to set permissions for $conffile");
+	  or croak("unable to set permissions for $conffile");
 
 	return;
 }
@@ -839,7 +839,7 @@ sub adjust_conf
 	close $fh;
 
 	chmod($self->group_access() ? 0640 : 0600, $conffile)
-	  or die("unable to set permissions for $conffile");
+	  or croak("unable to set permissions for $conffile");
 }
 
 =pod
@@ -995,7 +995,7 @@ sub init_from_backup
 	}
 	elsif (defined $params{tar_program})
 	{
-		mkdir($data_path) || die "mkdir $data_path: $!";
+		mkdir($data_path) || croak "mkdir $data_path: $!";
 		PostgreSQL::Test::Utils::system_or_bail(
 			$params{tar_program},
 			'xf' => $backup_path . '/base.tar',
@@ -1007,7 +1007,7 @@ sub init_from_backup
 
 		# We need to generate a tablespace_map file.
 		open(my $tsmap, ">", "$data_path/tablespace_map")
-		  || die "$data_path/tablespace_map: $!";
+		  || croak "$data_path/tablespace_map: $!";
 
 		# Extract tarfiles and add tablespace_map entries
 		my @tstars = grep { /^\d+.tar/ }
@@ -1017,12 +1017,12 @@ sub init_from_backup
 			my $tsoid = $tstar;
 			$tsoid =~ s/\.tar$//;
 
-			die "no tablespace mapping for $tstar"
+			croak "no tablespace mapping for $tstar"
 			  if !exists $params{tablespace_map}
 			  || !exists $params{tablespace_map}{$tsoid};
 			my $newdir = $params{tablespace_map}{$tsoid};
 
-			mkdir($newdir) || die "mkdir $newdir: $!";
+			mkdir($newdir) || croak "mkdir $newdir: $!";
 			PostgreSQL::Test::Utils::system_or_bail(
 				$params{tar_program},
 				'xf' => $backup_path . '/' . $tstar,
@@ -1061,12 +1061,12 @@ sub init_from_backup
 		{
 			# We need to generate a tablespace_map file.
 			open(my $tsmap, ">", "$data_path/tablespace_map")
-			  || die "$data_path/tablespace_map: $!";
+			  || croak "$data_path/tablespace_map: $!";
 
 			# Now use the list of tablespace links to copy each tablespace.
 			for my $tsoid (@tsoids)
 			{
-				die "no tablespace mapping for $tsoid"
+				croak "no tablespace mapping for $tsoid"
 				  if !exists $params{tablespace_map}
 				  || !exists $params{tablespace_map}{$tsoid};
 
@@ -1083,7 +1083,7 @@ sub init_from_backup
 			close($tsmap);
 		}
 	}
-	chmod(0700, $data_path) or die $!;
+	chmod(0700, $data_path) or croak $!;
 
 	# Base configuration for this node
 	$self->append_conf(
@@ -1906,7 +1906,7 @@ sub can_bind
 	my $paddr = sockaddr_in($port, $iaddr);
 
 	socket(SOCK, PF_INET, SOCK_STREAM, 0)
-	  or die "socket failed: $!";
+	  or croak "socket failed: $!";
 
 	# As in postmaster, don't use SO_REUSEADDR on Windows
 	setsockopt(SOCK, SOL_SOCKET, SO_REUSEADDR, pack("l", 1))
@@ -1924,9 +1924,9 @@ sub _reserve_port
 	# open in rw mode so we don't have to reopen it and lose the lock
 	my $filename = "$portdir/$port.rsv";
 	sysopen(my $portfile, $filename, O_RDWR | O_CREAT)
-	  || die "opening port file $filename: $!";
+	  || croak "opening port file $filename: $!";
 	# take an exclusive lock to avoid concurrent access
-	flock($portfile, LOCK_EX) || die "locking port file $filename: $!";
+	flock($portfile, LOCK_EX) || croak "locking port file $filename: $!";
 	# see if someone else has or had a reservation of this port
 	my $pid = <$portfile> || "0";
 	chomp $pid;
@@ -1935,16 +1935,16 @@ sub _reserve_port
 		if (kill 0, $pid)
 		{
 			# process exists and is owned by us, so we can't reserve this port
-			flock($portfile, LOCK_UN) || die $!;
+			flock($portfile, LOCK_UN) || croak $!;
 			close($portfile);
 			return 0;
 		}
 	}
 	# All good, go ahead and reserve the port
-	seek($portfile, 0, SEEK_SET) || die $!;
+	seek($portfile, 0, SEEK_SET) || croak $!;
 	# print the pid with a fixed width so we don't leave any trailing junk
 	print $portfile sprintf("%10d\n", $$);
-	flock($portfile, LOCK_UN) || die $!;
+	flock($portfile, LOCK_UN) || croak $!;
 	close($portfile);
 	push(@port_reservation_files, $filename);
 	return 1;
@@ -2246,13 +2246,13 @@ sub psql
 
 			# IPC::Run::run threw an exception. re-throw unless it's a
 			# timeout, which we'll handle by testing is_expired
-			die $exc_save
+			croak $exc_save
 			  if (blessed($exc_save)
 				|| $exc_save !~ /^\Q$timeout_exception\E/);
 
 			$ret = undef;
 
-			die "Got timeout exception '$exc_save' but timer not expired?!"
+			croak "Got timeout exception '$exc_save' but timer not expired?!"
 			  unless $timeout->is_expired;
 
 			if (defined($params{timed_out}))
@@ -2261,7 +2261,7 @@ sub psql
 			}
 			else
 			{
-				die "psql timed out: stderr: '$$stderr'\n"
+				croak "psql timed out: stderr: '$$stderr'\n"
 				  . "while running '@psql_params'";
 			}
 		}
@@ -2284,7 +2284,7 @@ sub psql
 	if (defined $ret)
 	{
 		my $core = $ret & 128 ? " (core dumped)" : "";
-		die "psql exited with signal "
+		croak "psql exited with signal "
 		  . ($ret & 127)
 		  . "$core: '$$stderr' while running '@psql_params'"
 		  if $ret & 127;
@@ -2293,14 +2293,14 @@ sub psql
 
 	if ($ret && $params{on_error_die})
 	{
-		die "psql error: stderr: '$$stderr'\nwhile running '@psql_params'"
+		croak "psql error: stderr: '$$stderr'\nwhile running '@psql_params'"
 		  if $ret == 1;
-		die "connection error: '$$stderr'\nwhile running '@psql_params'"
+		croak "connection error: '$$stderr'\nwhile running '@psql_params'"
 		  if $ret == 2;
-		die
+		croak
 		  "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
 		  if $ret == 3;
-		die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
+		croak "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
 	}
 
 	if (wantarray)
@@ -2501,7 +2501,7 @@ sub _pgbench_make_files
 			if (-e $filename)
 			{
 				ok(0, "$filename must not already exist");
-				unlink $filename or die "cannot unlink $filename: $!";
+				unlink $filename or croak "cannot unlink $filename: $!";
 			}
 			PostgreSQL::Test::Utils::append_to_file($filename, $$files{$fn});
 		}
@@ -3123,8 +3123,8 @@ sub write_wal
 	my $path =
 	  sprintf("%s/pg_wal/%08X%08X%08X", $self->data_dir, $tli, 0, $segment);
 
-	open my $fh, "+<:raw", $path or die "could not open WAL segment $path";
-	seek($fh, $offset, SEEK_SET) or die "could not seek WAL segment $path";
+	open my $fh, "+<:raw", $path or croak "could not open WAL segment $path";
+	seek($fh, $offset, SEEK_SET) or croak "could not seek WAL segment $path";
 	print $fh $data;
 	close $fh;
 
@@ -3288,7 +3288,7 @@ sub wait_for_event
 		SELECT count(*) > 0 FROM pg_stat_activity
 		WHERE backend_type = '$backend_type' AND wait_event = '$wait_event_name'
 	])
-	  or die
+	  or croak
 	  qq(timed out when waiting for $backend_type to reach wait event '$wait_event_name');
 
 	return;
@@ -3325,7 +3325,7 @@ poll_query_until timeout.
 
 Requires that the 'postgres' db exists and is accessible.
 
-This is not a test. It die()s on failure.
+This is not a test. It croak()s on failure.
 
 =cut
 
@@ -3409,7 +3409,7 @@ The replication connection must be in a streaming state.
 
 Requires that the 'postgres' db exists and is accessible.
 
-This is not a test. It die()s on failure.
+This is not a test. It croak()s on failure.
 
 =cut
 
@@ -3429,7 +3429,7 @@ be 'restart' or 'confirmed_flush'.
 
 Requires that the 'postgres' db exists and is accessible.
 
-This is not a test. It die()s on failure.
+This is not a test. It croak()s on failure.
 
 If the slot is not active, will time out after poll_query_until's timeout.
 
@@ -3484,7 +3484,7 @@ creating a new subscription.
 If there is no active replication connection from this peer, wait until
 poll_query_until timeout.
 
-This is not a test. It die()s on failure.
+This is not a test. It croak()s on failure.
 
 =cut
 
@@ -3624,7 +3624,7 @@ Disallows pg_recvlogical from internally retrying on error by passing --no-loop.
 
 Plugin options are passed as additional keyword arguments.
 
-If called in scalar context, returns stdout, and die()s on timeout or nonzero return.
+If called in scalar context, returns stdout, and croak()s on timeout or nonzero return.
 
 If called in array context, returns a tuple of (retval, stdout, stderr, timeout).
 timeout is the IPC::Run::Timeout object whose is_expired method can be tested
@@ -3680,15 +3680,15 @@ sub pg_recvlogical_upto
 
 			# IPC::Run::run threw an exception. re-throw unless it's a
 			# timeout, which we'll handle by testing is_expired
-			die $exc_save
+			croak $exc_save
 			  if (blessed($exc_save) || $exc_save !~ qr/$timeout_exception/);
 
 			$ret = undef;
 
-			die "Got timeout exception '$exc_save' but timer not expired?!"
+			croak "Got timeout exception '$exc_save' but timer not expired?!"
 			  unless $timeout->is_expired;
 
-			die
+			croak
 			  "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
 			  unless wantarray;
 		}
@@ -3700,7 +3700,7 @@ sub pg_recvlogical_upto
 	}
 	else
 	{
-		die
+		croak
 		  "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
 		  if $ret;
 		return $stdout;
@@ -3725,14 +3725,14 @@ sub corrupt_page_checksum
 	my $pgdata = $self->data_dir;
 	my $pageheader;
 
-	open my $fh, '+<', "$pgdata/$file" or die "open($file) failed: $!";
+	open my $fh, '+<', "$pgdata/$file" or croak "open($file) failed: $!";
 	binmode $fh;
-	sysseek($fh, $page_offset, 0) or die "sysseek failed: $!";
-	sysread($fh, $pageheader, 24) or die "sysread failed: $!";
+	sysseek($fh, $page_offset, 0) or croak "sysseek failed: $!";
+	sysread($fh, $pageheader, 24) or croak "sysread failed: $!";
 	# This inverts the pd_checksum field (only); see struct PageHeaderData
 	$pageheader ^= "\0\0\0\0\0\0\0\0\xff\xff";
-	sysseek($fh, $page_offset, 0) or die "sysseek failed: $!";
-	syswrite($fh, $pageheader) or die "syswrite failed: $!";
+	sysseek($fh, $page_offset, 0) or croak "sysseek failed: $!";
+	syswrite($fh, $pageheader) or croak "syswrite failed: $!";
 	close $fh;
 
 	return;
@@ -3760,7 +3760,7 @@ sub log_standby_snapshot
 		SELECT restart_lsn IS NOT NULL
 		FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'
 	])
-	  or die
+	  or croak
 	  "timed out waiting for logical slot to calculate its restart_lsn";
 
 	# Then arrange for the xl_running_xacts record for which the standby is
@@ -3802,7 +3802,7 @@ sub create_logical_slot_on_standby
 
 	is($self->slot($slot_name)->{'slot_type'},
 		'logical', $slot_name . ' on standby created')
-	  or die "could not create slot" . $slot_name;
+	  or croak "could not create slot" . $slot_name;
 }
 
 =pod
@@ -3833,7 +3833,7 @@ sub validate_slot_inactive_since
 		),
 		't',
 		"last inactive time for slot $slot_name is valid on node $name")
-	  or die "could not validate captured inactive_since for slot $slot_name";
+	  or croak "could not validate captured inactive_since for slot $slot_name";
 
 	return $inactive_since;
 }
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index 39f76c0aaba..949bd219a8d 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -636,7 +636,7 @@ sub read_head_tail
 
 	return ([], []) if $line_count <= 0;
 
-	open my $fh, '<', $filename or die "couldn't open file: $filename\n";
+	open my $fh, '<', $filename or croak "couldn't open file: $filename\n";
 	my @lines = <$fh>;
 	close $fh;
 
@@ -701,7 +701,7 @@ sub check_mode_recursive
 					}
 					else
 					{
-						die $msg;
+						croak $msg;
 					}
 				}
 
@@ -740,7 +740,7 @@ sub check_mode_recursive
 				# Else something we can't handle
 				else
 				{
-					die "unknown file type for $File::Find::name";
+					croak "unknown file type for $File::Find::name";
 				}
 			}
 		},
@@ -772,7 +772,7 @@ sub chmod_recursive
 					chmod(
 						S_ISDIR($file_stat->mode) ? $dir_mode : $file_mode,
 						$File::Find::name
-					) or die "unable to chmod $File::Find::name";
+					) or croak "unable to chmod $File::Find::name";
 				}
 			}
 		},
@@ -798,11 +798,11 @@ sub scan_server_header
 	my $result = IPC::Run::run [ 'pg_config', '--includedir-server' ],
 	  '>' => \$stdout,
 	  '2>' => \$stderr
-	  or die "could not execute pg_config";
+	  or croak "could not execute pg_config";
 	chomp($stdout);
 	$stdout =~ s/\r$//;
 
-	open my $header_h, '<', "$stdout/$header_path" or die "$!";
+	open my $header_h, '<', "$stdout/$header_path" or croak "$!";
 
 	my @match = undef;
 	while (<$header_h>)
@@ -816,7 +816,7 @@ sub scan_server_header
 	}
 
 	close $header_h;
-	die "could not find match in header $header_path\n"
+	croak "could not find match in header $header_path\n"
 	  unless @match;
 	return @match;
 }
@@ -837,11 +837,11 @@ sub check_pg_config
 	my $result = IPC::Run::run [ 'pg_config', '--includedir' ],
 	  '>' => \$stdout,
 	  '2>' => \$stderr
-	  or die "could not execute pg_config";
+	  or croak "could not execute pg_config";
 	chomp($stdout);
 	$stdout =~ s/\r$//;
 
-	open my $pg_config_h, '<', "$stdout/pg_config.h" or die "$!";
+	open my $pg_config_h, '<', "$stdout/pg_config.h" or croak "$!";
 	my $match = (grep { /^$regexp/ } <$pg_config_h>);
 	close $pg_config_h;
 	return $match;
@@ -946,13 +946,13 @@ sub dir_symlink
 			# need some indirection on msys
 			$cmd = qq{echo '$cmd' | \$COMSPEC /Q};
 		}
-		system($cmd) == 0 or die;
+		system($cmd) == 0 or croak;
 	}
 	else
 	{
-		symlink $oldname, $newname or die $!;
+		symlink $oldname, $newname or croak $!;
 	}
-	die "No $newname" unless -e $newname;
+	croak "No $newname" unless -e $newname;
 }
 
 # Log command output. Truncates to first/last 30 lines if over 60 lines.
@@ -1274,7 +1274,7 @@ sub command_checks_all
 
 	# See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
 	my $ret = $?;
-	die "command exited with signal " . ($ret & 127)
+	croak "command exited with signal " . ($ret & 127)
 	  if $ret & 127;
 	$ret = $ret >> 8;
 
-- 
2.52.0

Reply via email to