Hi Hackers,

In some recent-ish commits (ce1b0f9d, fd4c4ede and cc2c9fa6) we
converted a lot of the TAP tests to use long command line options and
fat commas (=>) to separate command line options from their arguments,
so that perltidy does't break lines between them.

However, those patches were nowhere near complete, so here's a follow-up
to fix all the cases I could find.  While I was there I failed to resist
the urge to do some other minor tidy-ups that I think make things
neater, but perltidy has no opinion on. I also eliminated some variables
that were only initialised and then used once.

The second patch might be more controversial: it eliminates unnecessary
quoting from hash keys, both inside curly braces and before fat commas
(except where a hash has a mix of keys that need quoting and not).

- ilmari

>From c16e4fa67455ec6c7534a73d03c96a73530beeef Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= <ilm...@ilmari.org>
Date: Tue, 28 Jan 2025 14:07:46 +0000
Subject: [PATCH 1/2] TAP tests: use more fat commas and long options

Also eliminate some pointless used-once variables
---
 contrib/auto_explain/t/001_auto_explain.pl    |   2 +-
 contrib/basebackup_to_shell/t/001_basic.pl    |   6 +-
 contrib/sepgsql/t/001_sepgsql.pl              |   3 +-
 .../t/010_pg_archivecleanup.pl                |   9 +-
 src/bin/pg_dump/t/010_dump_connstr.pl         |  14 +-
 src/bin/pg_resetwal/t/002_corrupted.pl        |   3 +-
 src/bin/pg_rewind/t/009_growing_files.pl      |   3 +-
 src/bin/pg_rewind/t/RewindTest.pm             |  21 ++-
 src/bin/pg_upgrade/t/002_pg_upgrade.pl        |  74 ++++----
 src/bin/pg_upgrade/t/003_logical_slots.pl     |  14 +-
 src/bin/pg_upgrade/t/005_char_signedness.pl   |  37 ++--
 src/bin/pg_verifybackup/t/002_algorithm.pl    |  13 +-
 src/bin/pg_verifybackup/t/003_corruption.pl   |   4 +-
 src/bin/pg_verifybackup/t/004_options.pl      |   3 +-
 src/bin/pg_verifybackup/t/008_untar.pl        |  20 ++-
 src/bin/pg_verifybackup/t/009_extract.pl      |  25 +--
 src/bin/pg_verifybackup/t/010_client_untar.pl |  17 +-
 src/bin/pg_waldump/t/001_basic.pl             |  24 +--
 src/bin/pgbench/t/001_pgbench_with_server.pl  |   2 +-
 src/bin/pgbench/t/002_pgbench_no_server.pl    |   2 +-
 src/bin/psql/t/001_basic.pl                   |   5 +-
 src/bin/psql/t/020_cancel.pl                  |   9 +-
 src/interfaces/libpq/t/001_uri.pl             |   5 +-
 .../libpq/t/005_negotiate_encryption.pl       |   4 +-
 src/test/modules/commit_ts/t/003_standby_2.pl |   2 +-
 .../libpq_pipeline/t/001_libpq_pipeline.pl    |   4 +-
 .../ssl_passphrase_callback/t/001_testfunc.pl |   8 +-
 src/test/modules/test_pg_dump/t/001_base.pl   | 159 +++++++++++-------
 .../perl/PostgreSQL/Test/BackgroundPsql.pm    |   9 +-
 src/test/perl/PostgreSQL/Test/Cluster.pm      | 140 +++++++++------
 src/test/perl/PostgreSQL/Test/Kerberos.pm     |   2 +-
 src/test/perl/PostgreSQL/Test/Utils.pm        |  40 +++--
 .../postmaster/t/002_connection_limits.pl     |   5 +-
 src/test/recovery/t/001_stream_rep.pl         |  48 +++---
 src/test/recovery/t/006_logical_decoding.pl   |   7 +-
 src/test/recovery/t/013_crash_restart.pl      |  29 ++--
 src/test/recovery/t/017_shm.pl                |  18 +-
 src/test/recovery/t/021_row_visibility.pl     |  30 ++--
 src/test/recovery/t/022_crash_temp_files.pl   |  30 ++--
 src/test/recovery/t/032_relfilenode_reuse.pl  |  30 ++--
 .../t/035_standby_logical_decoding.pl         |  36 ++--
 .../t/040_standby_failover_slots_sync.pl      |   2 +-
 .../recovery/t/041_checkpoint_at_promote.pl   |  15 +-
 .../pg_bsd_indent/t/001_pg_bsd_indent.pl      |   2 +-
 44 files changed, 530 insertions(+), 405 deletions(-)

diff --git a/contrib/auto_explain/t/001_auto_explain.pl b/contrib/auto_explain/t/001_auto_explain.pl
index 6e514db35d1..80c0c19af58 100644
--- a/contrib/auto_explain/t/001_auto_explain.pl
+++ b/contrib/auto_explain/t/001_auto_explain.pl
@@ -28,7 +28,7 @@ sub query_log
 }
 
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('auth_extra' => [ '--create-role', 'regress_user1' ]);
+$node->init('auth_extra' => [ '--create-role' => 'regress_user1' ]);
 $node->append_conf('postgresql.conf',
 	"session_preload_libraries = 'auto_explain'");
 $node->append_conf('postgresql.conf', "auto_explain.log_min_duration = 0");
diff --git a/contrib/basebackup_to_shell/t/001_basic.pl b/contrib/basebackup_to_shell/t/001_basic.pl
index 6ffc89433a9..3ee4603bd3a 100644
--- a/contrib/basebackup_to_shell/t/001_basic.pl
+++ b/contrib/basebackup_to_shell/t/001_basic.pl
@@ -131,8 +131,10 @@ sub verify_backup
 
 		# Untar.
 		my $extract_path = PostgreSQL::Test::Utils::tempdir;
-		system_or_bail($tar, 'xf', $backup_dir . '/' . $prefix . 'base.tar',
-			'-C', $extract_path);
+		system_or_bail(
+			$tar,
+			'xf' => $backup_dir . '/' . $prefix . 'base.tar',
+			'-C' => $extract_path);
 
 		# Verify.
 		$node->command_ok(
diff --git a/contrib/sepgsql/t/001_sepgsql.pl b/contrib/sepgsql/t/001_sepgsql.pl
index c5fd7254841..cd00e4963db 100644
--- a/contrib/sepgsql/t/001_sepgsql.pl
+++ b/contrib/sepgsql/t/001_sepgsql.pl
@@ -216,8 +216,7 @@ $node->append_conf('postgresql.conf', 'log_statement=none');
 			'-D' => $node->data_dir,
 			'template0'
 		],
-		'<',
-		$ENV{share_contrib_dir} . '/sepgsql.sql');
+		'<' => $ENV{share_contrib_dir} . '/sepgsql.sql');
 	ok($result, 'sepgsql installation script');
 }
 
diff --git a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl
index 4103fec7f7a..c6148cda7fc 100644
--- a/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl
+++ b/src/bin/pg_archivecleanup/t/010_pg_archivecleanup.pl
@@ -85,10 +85,11 @@ command_fails_like(
 
 	my $stderr;
 	my $oldestkeptwalfile = '00000001000000370000000E';
-	my $result =
-	  IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
-		$oldestkeptwalfile ],
-	  '2>', \$stderr;
+	my $result = IPC::Run::run [
+		'pg_archivecleanup', '--debug', '--dry-run', $tempdir,
+		$oldestkeptwalfile
+	  ],
+	  '2>' => \$stderr;
 	ok($result, "pg_archivecleanup dry run: exit code 0");
 
 	for my $walpair (@walfiles_verbose)
diff --git a/src/bin/pg_dump/t/010_dump_connstr.pl b/src/bin/pg_dump/t/010_dump_connstr.pl
index bde6096c60d..e3d3a71f14a 100644
--- a/src/bin/pg_dump/t/010_dump_connstr.pl
+++ b/src/bin/pg_dump/t/010_dump_connstr.pl
@@ -244,9 +244,10 @@ $envar_node->init(
 		'--locale' => 'C',
 		'--encoding' => 'LATIN1',
 	],
-	auth_extra =>
-	  [ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ],
-);
+	auth_extra => [
+		'--user' => $dst_bootstrap_super,
+		'--create-role' => $restore_super,
+	]);
 $envar_node->start;
 
 # make superuser for restore
@@ -280,9 +281,10 @@ $cmdline_node->init(
 		'--locale' => 'C',
 		'--encoding' => 'LATIN1',
 	],
-	auth_extra =>
-	  [ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ],
-);
+	auth_extra => [
+		'--user' => $dst_bootstrap_super,
+		'--create-role' => $restore_super,
+	]);
 $cmdline_node->start;
 $cmdline_node->run_log(
 	[
diff --git a/src/bin/pg_resetwal/t/002_corrupted.pl b/src/bin/pg_resetwal/t/002_corrupted.pl
index 869d5d8d2a6..3c80f6309c9 100644
--- a/src/bin/pg_resetwal/t/002_corrupted.pl
+++ b/src/bin/pg_resetwal/t/002_corrupted.pl
@@ -60,7 +60,8 @@ command_fails_like(
 	[ 'pg_resetwal', $node->data_dir ],
 	qr/not proceeding because control file values were guessed/,
 	'does not run when control file values were guessed');
-command_ok([ 'pg_resetwal', '-f', $node->data_dir ],
+command_ok(
+	[ 'pg_resetwal', '--force', $node->data_dir ],
 	'runs with force when control file values were guessed');
 
 done_testing();
diff --git a/src/bin/pg_rewind/t/009_growing_files.pl b/src/bin/pg_rewind/t/009_growing_files.pl
index 643d200dcc9..afe68c8bf0d 100644
--- a/src/bin/pg_rewind/t/009_growing_files.pl
+++ b/src/bin/pg_rewind/t/009_growing_files.pl
@@ -56,8 +56,7 @@ my $ret = run_log(
 		'--target-pgdata' => $primary_pgdata,
 		'--no-sync',
 	],
-	'2>>',
-	"$standby_pgdata/tst_both_dir/file1");
+	'2>>' => "$standby_pgdata/tst_both_dir/file1");
 ok(!$ret, 'Error out on copying growing file');
 
 # Ensure that the files are of different size, the final error message should
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index ec3b4a51995..3efab831797 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -69,8 +69,9 @@ sub primary_psql
 	my $cmd = shift;
 	my $dbname = shift || 'postgres';
 
-	system_or_bail 'psql', '-q', '--no-psqlrc', '-d',
-	  $node_primary->connstr($dbname), '-c', "$cmd";
+	system_or_bail 'psql', '--quiet', '--no-psqlrc',
+	  '--dbname' => $node_primary->connstr($dbname),
+	  '--command' => $cmd;
 	return;
 }
 
@@ -79,8 +80,9 @@ sub standby_psql
 	my $cmd = shift;
 	my $dbname = shift || 'postgres';
 
-	system_or_bail 'psql', '-q', '--no-psqlrc', '-d',
-	  $node_standby->connstr($dbname), '-c', "$cmd";
+	system_or_bail 'psql', '--quiet', '--no-psqlrc',
+	  '--dbname' => $node_standby->connstr($dbname),
+	  '--command' => $cmd;
 	return;
 }
 
@@ -95,11 +97,12 @@ sub check_query
 
 	# we want just the output, no formatting
 	my $result = run [
-		'psql', '-q', '-A', '-t', '--no-psqlrc', '-d',
-		$node_primary->connstr('postgres'),
-		'-c', $query
+		'psql', '--quiet', '--no-align', '--tuples-only', '--no-psqlrc',
+		'--dbname' => $node_primary->connstr('postgres'),
+		'--command' => $query
 	  ],
-	  '>', \$stdout, '2>', \$stderr;
+	  '>' => \$stdout,
+	  '2>' => \$stderr;
 
 	is($result, 1, "$test_name: psql exit code");
 	is($stderr, '', "$test_name: psql no stderr");
@@ -124,7 +127,7 @@ sub setup_cluster
 	$node_primary->init(
 		allows_streaming => 1,
 		extra => $extra,
-		auth_extra => [ '--create-role', 'rewind_user' ]);
+		auth_extra => [ '--create-role' => 'rewind_user' ]);
 
 	# Set wal_keep_size to prevent WAL segment recycling after enforced
 	# checkpoints in the tests.
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index c00cf68d660..00051b85035 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -208,7 +208,8 @@ if (defined($ENV{olddump}))
 
 	# Load the dump using the "postgres" database as "regression" does
 	# not exist yet, and we are done here.
-	$oldnode->command_ok([ 'psql', '-X', '-f', $olddumpfile, 'postgres' ],
+	$oldnode->command_ok(
+		[ 'psql', '--no-psqlrc', '--file' => $olddumpfile, 'postgres' ],
 		'loaded old dump file');
 }
 else
@@ -303,14 +304,15 @@ if (defined($ENV{oldinstall}))
 		my @command_args = ();
 		for my $upcmd (@{ $adjust_cmds->{$updb} })
 		{
-			push @command_args, '-c', $upcmd;
+			push @command_args, '--command' => $upcmd;
 		}
 
 		# For simplicity, use the newer version's psql to issue the commands.
 		$newnode->command_ok(
 			[
-				'psql', '-X', '-v', 'ON_ERROR_STOP=1',
-				'-d', $oldnode->connstr($updb),
+				'psql', '--no-psqlrc',
+				'--set' => 'ON_ERROR_STOP=1',
+				'--dbname' => $oldnode->connstr($updb),
 				@command_args,
 			],
 			"ran version adaptation commands for database $updb");
@@ -324,8 +326,9 @@ $oldnode->restart;
 # Take a dump before performing the upgrade as a base comparison. Note
 # that we need to use pg_dumpall from the new node here.
 my @dump_command = (
-	'pg_dumpall', '--no-sync', '-d', $oldnode->connstr('postgres'),
-	'-f', $dump1_file);
+	'pg_dumpall', '--no-sync',
+	'--dbname' => $oldnode->connstr('postgres'),
+	'--file' => $dump1_file);
 # --extra-float-digits is needed when upgrading from a version older than 11.
 push(@dump_command, '--extra-float-digits', '0')
   if ($oldnode->pg_version < 12);
@@ -399,13 +402,13 @@ $oldnode->stop;
 command_checks_all(
 	[
 		'pg_upgrade', '--no-sync',
-		'-d', $oldnode->data_dir,
-		'-D', $newnode->data_dir,
-		'-b', $oldbindir . '/does/not/exist/',
-		'-B', $newbindir,
-		'-s', $newnode->host,
-		'-p', $oldnode->port,
-		'-P', $newnode->port,
+		'--old-datadir' => $oldnode->data_dir,
+		'--new-datadir' => $newnode->data_dir,
+		'--old-bindir' => $oldbindir . '/does/not/exist/',
+		'--new-bindir' => $newbindir,
+		'--socketdir' => $newnode->host,
+		'--old-port' => $oldnode->port,
+		'--new-port' => $newnode->port,
 		$mode, '--check',
 	],
 	1,
@@ -427,13 +430,13 @@ SKIP:
 	command_checks_all(
 		[
 			'pg_upgrade', '--no-sync',
-			'-d', $oldnode->data_dir,
-			'-D', $newnode->data_dir,
-			'-b', $oldbindir,
-			'-B', $newbindir,
-			'-s', $newnode->host,
-			'-p', $oldnode->port,
-			'-P', $newnode->port,
+			'--old-datadir' => $oldnode->data_dir,
+			'--new-datadir' => $newnode->data_dir,
+			'--old-bindir' => $oldbindir,
+			'--new-bindir' => $newbindir,
+			'--socketdir' => $newnode->host,
+			'--old-port' => $oldnode->port,
+			'--new-port' => $newnode->port,
 			$mode, '--check',
 		],
 		1,
@@ -451,10 +454,14 @@ $oldnode->stop;
 # --check command works here, cleans up pg_upgrade_output.d.
 command_ok(
 	[
-		'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir,
-		'-D', $newnode->data_dir, '-b', $oldbindir,
-		'-B', $newbindir, '-s', $newnode->host,
-		'-p', $oldnode->port, '-P', $newnode->port,
+		'pg_upgrade', '--no-sync',
+		'--old-datadir' => $oldnode->data_dir,
+		'--new-datadir' => $newnode->data_dir,
+		'--old-bindir' => $oldbindir,
+		'--new-bindir' => $newbindir,
+		'--socketdir' => $newnode->host,
+		'--old-port' => $oldnode->port,
+		'--new-port' => $newnode->port,
 		$mode, '--check',
 	],
 	'run of pg_upgrade --check for new instance');
@@ -464,10 +471,14 @@ ok(!-d $newnode->data_dir . "/pg_upgrade_output.d",
 # Actual run, pg_upgrade_output.d is removed at the end.
 command_ok(
 	[
-		'pg_upgrade', '--no-sync', '-d', $oldnode->data_dir,
-		'-D', $newnode->data_dir, '-b', $oldbindir,
-		'-B', $newbindir, '-s', $newnode->host,
-		'-p', $oldnode->port, '-P', $newnode->port,
+		'pg_upgrade', '--no-sync',
+		'--old-datadir' => $oldnode->data_dir,
+		'--new-datadir' => $newnode->data_dir,
+		'--old-bindir' => $oldbindir,
+		'--new-bindir' => $newbindir,
+		'--socketdir' => $newnode->host,
+		'--old-port' => $oldnode->port,
+		'--new-port' => $newnode->port,
 		$mode,
 	],
 	'run of pg_upgrade for new instance');
@@ -512,10 +523,11 @@ is( $result,
 
 # Second dump from the upgraded instance.
 @dump_command = (
-	'pg_dumpall', '--no-sync', '-d', $newnode->connstr('postgres'),
-	'-f', $dump2_file);
+	'pg_dumpall', '--no-sync',
+	'--dbname' => $newnode->connstr('postgres'),
+	'--file' => $dump2_file);
 # --extra-float-digits is needed when upgrading from a version older than 11.
-push(@dump_command, '--extra-float-digits', '0')
+push(@dump_command, '--extra-float-digits' => '0')
   if ($oldnode->pg_version < 12);
 $newnode->command_ok(\@dump_command, 'dump after running pg_upgrade');
 
diff --git a/src/bin/pg_upgrade/t/003_logical_slots.pl b/src/bin/pg_upgrade/t/003_logical_slots.pl
index 785353ce4d5..75db4911730 100644
--- a/src/bin/pg_upgrade/t/003_logical_slots.pl
+++ b/src/bin/pg_upgrade/t/003_logical_slots.pl
@@ -40,13 +40,13 @@ checkpoint_timeout = 1h
 # Setup a common pg_upgrade command to be used by all the test cases
 my @pg_upgrade_cmd = (
 	'pg_upgrade', '--no-sync',
-	'-d', $oldpub->data_dir,
-	'-D', $newpub->data_dir,
-	'-b', $oldpub->config_data('--bindir'),
-	'-B', $newpub->config_data('--bindir'),
-	'-s', $newpub->host,
-	'-p', $oldpub->port,
-	'-P', $newpub->port,
+	'--old-datadir' => $oldpub->data_dir,
+	'--new-datadir' => $newpub->data_dir,
+	'--old-bindir' => $oldpub->config_data('--bindir'),
+	'--new-bindir' => $newpub->config_data('--bindir'),
+	'--socketdir' => $newpub->host,
+	'--old-port' => $oldpub->port,
+	'--new-port' => $newpub->port,
 	$mode);
 
 # In a VPATH build, we'll be started in the source directory, but we want
diff --git a/src/bin/pg_upgrade/t/005_char_signedness.pl b/src/bin/pg_upgrade/t/005_char_signedness.pl
index d186822ac77..0190747758c 100644
--- a/src/bin/pg_upgrade/t/005_char_signedness.pl
+++ b/src/bin/pg_upgrade/t/005_char_signedness.pl
@@ -31,7 +31,12 @@ command_like(
 
 # Set the old cluster's default char signedness to unsigned for test.
 command_ok(
-	[ 'pg_resetwal', '--char-signedness', 'unsigned', '-f', $old->data_dir ],
+	[
+		'pg_resetwal',
+		'--char-signedness' => 'unsigned',
+		'--force',
+		$old->data_dir
+	],
 	"set old cluster's default char signedness to unsigned");
 
 # Check if the value is successfully updated.
@@ -44,14 +49,14 @@ command_like(
 command_checks_all(
 	[
 		'pg_upgrade', '--no-sync',
-		'-d', $old->data_dir,
-		'-D', $new->data_dir,
-		'-b', $old->config_data('--bindir'),
-		'-B', $new->config_data('--bindir'),
-		'-s', $new->host,
-		'-p', $old->port,
-		'-P', $new->port,
-		'--set-char-signedness', 'signed',
+		'--old-datadir' => $old->data_dir,
+		'--new-datadir' => $new->data_dir,
+		'--old-bindir' => $old->config_data('--bindir'),
+		'--new-bindir' => $new->config_data('--bindir'),
+		'--socketdir' => $new->host,
+		'--old-port' => $old->port,
+		'--new-port' => $new->port,
+		'--set-char-signedness' => 'signed',
 		$mode
 	],
 	1,
@@ -64,13 +69,13 @@ command_checks_all(
 command_ok(
 	[
 		'pg_upgrade', '--no-sync',
-		'-d', $old->data_dir,
-		'-D', $new->data_dir,
-		'-b', $old->config_data('--bindir'),
-		'-B', $new->config_data('--bindir'),
-		'-s', $new->host,
-		'-p', $old->port,
-		'-P', $new->port,
+		'--old-datadir' => $old->data_dir,
+		'--new-datadir' => $new->data_dir,
+		'--old-bindir' => $old->config_data('--bindir'),
+		'--new-bindir' => $new->config_data('--bindir'),
+		'--socketdir' => $new->host,
+		'--old-port' => $old->port,
+		'--new-port' => $new->port,
 		$mode
 	],
 	'run of pg_upgrade');
diff --git a/src/bin/pg_verifybackup/t/002_algorithm.pl b/src/bin/pg_verifybackup/t/002_algorithm.pl
index 71aaa8d881f..ae16c11bc4d 100644
--- a/src/bin/pg_verifybackup/t/002_algorithm.pl
+++ b/src/bin/pg_verifybackup/t/002_algorithm.pl
@@ -19,18 +19,21 @@ sub test_checksums
 	my ($format, $algorithm) = @_;
 	my $backup_path = $primary->backup_dir . '/' . $format . '/' . $algorithm;
 	my @backup = (
-		'pg_basebackup', '-D', $backup_path,
-		'--manifest-checksums', $algorithm, '--no-sync', '-cfast');
-	my @verify = ('pg_verifybackup', '-e', $backup_path);
+		'pg_basebackup',
+		'--pgdata' => $backup_path,
+		'--manifest-checksums' => $algorithm,
+		'--no-sync',
+		'--checkpoint' => 'fast');
+	my @verify = ('pg_verifybackup', '--exit-on-error', $backup_path);
 
 	if ($format eq 'tar')
 	{
 		# Add switch to get a tar-format backup
-		push @backup, ('-F', 't');
+		push @backup, ('--format' => 'tar');
 
 		# Add switch to skip WAL verification, which is not yet supported for
 		# tar-format backups
-		push @verify, ('-n');
+		push @verify, ('--no-parse-wal');
 	}
 
 	# A backup with a bogus algorithm should fail.
diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl
index 8ef7f8a4e7a..84f23b8bc3d 100644
--- a/src/bin/pg_verifybackup/t/003_corruption.pl
+++ b/src/bin/pg_verifybackup/t/003_corruption.pl
@@ -190,13 +190,13 @@ for my $scenario (@scenario)
 
 			# Construct base.tar with what's left.
 			chdir($backup_path) || die "chdir: $!";
-			command_ok([ $tar, '-cf', "$tar_backup_path/base.tar", '.' ]);
+			command_ok([ $tar, '-cf' => "$tar_backup_path/base.tar", '.' ]);
 			chdir($cwd) || die "chdir: $!";
 
 			# Now check that the backup no longer verifies. We must use -n
 			# here, because pg_waldump can't yet read WAL from a tarfile.
 			command_fails_like(
-				[ 'pg_verifybackup', '-n', $tar_backup_path ],
+				[ 'pg_verifybackup', '--no-parse-wal', $tar_backup_path ],
 				$scenario->{'fails_like'},
 				"corrupt backup fails verification: $name");
 
diff --git a/src/bin/pg_verifybackup/t/004_options.pl b/src/bin/pg_verifybackup/t/004_options.pl
index 52660786680..8c83dc7189e 100644
--- a/src/bin/pg_verifybackup/t/004_options.pl
+++ b/src/bin/pg_verifybackup/t/004_options.pl
@@ -119,7 +119,8 @@ command_like(
 
 # Verify that when --ignore is not used, both problems are reported.
 $result = IPC::Run::run [ 'pg_verifybackup', $backup_path ],
-  '>', \$stdout, '2>', \$stderr;
+  '>' => \$stdout,
+  '2>' => \$stderr;
 ok(!$result, "multiple problems: fails");
 like(
 	$stderr,
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index 590c497503c..deed3ec247d 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -79,12 +79,14 @@ for my $tc (@test_configuration)
 			|| $tc->{'decompress_program'} eq '');
 
 		# Take a server-side backup.
-		my @backup = (
-			'pg_basebackup', '--no-sync',
-			'-cfast', '--target',
-			"server:$backup_path", '-Xfetch');
-		push @backup, @{ $tc->{'backup_flags'} };
-		$primary->command_ok(\@backup,
+		$primary->command_ok(
+			[
+				'pg_basebackup', '--no-sync',
+				'--checkpoint' => 'fast',
+				'--target' => "server:$backup_path",
+				'--wal-method' => 'fetch',
+				@{ $tc->{'backup_flags'} },
+			],
 			"server side backup, compression $method");
 
 
@@ -97,7 +99,11 @@ for my $tc (@test_configuration)
 			"found expected backup files, compression $method");
 
 		# Verify tar backup.
-		$primary->command_ok([ 'pg_verifybackup', '-n', '-e', $backup_path ],
+		$primary->command_ok(
+			[
+				'pg_verifybackup', '--no-parse-wal',
+				'--exit-on-error', $backup_path,
+			],
 			"verify backup, compression $method");
 
 		# Cleanup.
diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl
index ae2b5a9b36d..25605291217 100644
--- a/src/bin/pg_verifybackup/t/009_extract.pl
+++ b/src/bin/pg_verifybackup/t/009_extract.pl
@@ -53,19 +53,21 @@ for my $tc (@test_configuration)
 		skip "$method compression not supported by this build", 2
 		  if !$tc->{'enabled'};
 
-		# Take backup with server compression enabled.
-		my @backup = (
-			'pg_basebackup', '-D', $backup_path,
-			'-Xfetch', '--no-sync', '-cfast', '-Fp');
-		push @backup, @{ $tc->{'backup_flags'} };
-
-		my @verify = ('pg_verifybackup', '-e', $backup_path);
-
 		# A backup with a valid compression method should work.
 		my $backup_stdout = '';
 		my $backup_stderr = '';
-		my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
-			'2>', \$backup_stderr);
+		my $backup_result = $primary->run_log(
+			[
+				'pg_basebackup',
+				'--pgdata' => $backup_path,
+				'--wal-method' => 'fetch',
+				'--no-sync',
+				'--checkpoint' => 'fast',
+				'--format' => 'plain',
+				@{ $tc->{'backup_flags'} },
+			],
+			'>' => \$backup_stdout,
+			'2>' => \$backup_stderr);
 		if ($backup_stdout ne '')
 		{
 			print "# standard output was:\n$backup_stdout";
@@ -86,7 +88,8 @@ for my $tc (@test_configuration)
 		}
 
 		# Make sure that it verifies OK.
-		$primary->command_ok(\@verify,
+		$primary->command_ok(
+			[ 'pg_verifybackup', '--exit-on-error', $backup_path ],
 			"backup verified, compression method \"$method\"");
 	}
 
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index 4559c5c75e8..d8d2b06c7ee 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -72,14 +72,19 @@ for my $tc (@test_configuration)
 			|| $tc->{'decompress_program'} eq '');
 
 		# Take a client-side backup.
-		my @backup = (
-			'pg_basebackup', '-D', $backup_path,
-			'-Xfetch', '--no-sync', '-cfast', '-Ft');
-		push @backup, @{ $tc->{'backup_flags'} };
 		my $backup_stdout = '';
 		my $backup_stderr = '';
-		my $backup_result = $primary->run_log(\@backup, '>', \$backup_stdout,
-			'2>', \$backup_stderr);
+		my $backup_result = $primary->run_log(
+			[
+				'pg_basebackup', '--no-sync',
+				'--pgdata' => $backup_path,
+				'--wal-method' => 'fetch',
+				'--checkpoint' => 'fast',
+				'--format' => 'tar',
+				@{ $tc->{'backup_flags'} }
+			],
+			'>' => \$backup_stdout,
+			'2>' => \$backup_stderr);
 		if ($backup_stdout ne '')
 		{
 			print "# standard output was:\n$backup_stdout";
diff --git a/src/bin/pg_waldump/t/001_basic.pl b/src/bin/pg_waldump/t/001_basic.pl
index 5c8fea275bb..f26d75e01cf 100644
--- a/src/bin/pg_waldump/t/001_basic.pl
+++ b/src/bin/pg_waldump/t/001_basic.pl
@@ -247,13 +247,15 @@ command_fails_like(
 	$lsn2++;
 	my $new_start = sprintf("%s/%X", $part1, $lsn2);
 
-	my (@cmd, $stdout, $stderr, $result);
+	my ($stdout, $stderr);
 
-	@cmd = (
+	my $result = IPC::Run::run [
 		'pg_waldump',
 		'--start' => $new_start,
-		$node->data_dir . '/pg_wal/' . $start_walfile);
-	$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
+		$node->data_dir . '/pg_wal/' . $start_walfile
+	  ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr;
 	ok($result, "runs with start segment and start LSN specified");
 	like($stderr, qr/first record is after/, 'info message printed');
 }
@@ -266,18 +268,20 @@ sub test_pg_waldump
 	local $Test::Builder::Level = $Test::Builder::Level + 1;
 	my @opts = @_;
 
-	my (@cmd, $stdout, $stderr, $result, @lines);
+	my ($stdout, $stderr);
 
-	@cmd = (
+	my $result = IPC::Run::run [
 		'pg_waldump',
 		'--path' => $node->data_dir,
 		'--start' => $start_lsn,
-		'--end' => $end_lsn);
-	push @cmd, @opts;
-	$result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr;
+		'--end' => $end_lsn,
+		@opts
+	  ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr;
 	ok($result, "pg_waldump @opts: runs ok");
 	is($stderr, '', "pg_waldump @opts: no stderr");
-	@lines = split /\n/, $stdout;
+	my @lines = split /\n/, $stdout;
 	ok(@lines > 0, "pg_waldump @opts: some lines are output");
 	return @lines;
 }
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 8816af17ac1..7dd78940300 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -213,7 +213,7 @@ my $nthreads = 2;
 
 {
 	my ($stderr);
-	run_log([ 'pgbench', '--jobs' => '2', '--bad-option' ], '2>', \$stderr);
+	run_log([ 'pgbench', '--jobs' => '2', '--bad-option' ], '2>' => \$stderr);
 	$nthreads = 1 if $stderr =~ m/threads are not supported on this platform/;
 }
 
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index ea6677f90e1..f975c73dd75 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -50,7 +50,7 @@ sub pgbench_scripts
 			# cleanup from prior runs
 			unlink $filename;
 			append_to_file($filename, $$files{$fn});
-			push @cmd, '-f', $filename;
+			push @cmd, '--file' => $filename;
 		}
 	}
 	command_checks_all(\@cmd, $stat, $out, $err, $name);
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 3170bc86856..dca34ac975a 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -52,8 +52,9 @@ foreach my $arg (qw(commands variables))
 	my ($stdout, $stderr);
 	my $result;
 
-	$result = IPC::Run::run [ 'psql', "--help=$arg" ], '>', \$stdout, '2>',
-	  \$stderr;
+	$result = IPC::Run::run [ 'psql', "--help=$arg" ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr;
 	ok($result, "psql --help=$arg exit code 0");
 	isnt($stdout, '', "psql --help=$arg goes to stdout");
 	is($stderr, '', "psql --help=$arg nothing to stderr");
diff --git a/src/bin/psql/t/020_cancel.pl b/src/bin/psql/t/020_cancel.pl
index a40f86293f2..154a24bca98 100644
--- a/src/bin/psql/t/020_cancel.pl
+++ b/src/bin/psql/t/020_cancel.pl
@@ -22,8 +22,13 @@ $node->start;
 local %ENV = $node->_get_env();
 
 my ($stdin, $stdout, $stderr);
-my $h = IPC::Run::start([ 'psql', '-X', '-v', 'ON_ERROR_STOP=1' ],
-	\$stdin, \$stdout, \$stderr);
+my $h = IPC::Run::start(
+	[
+		'psql', '--no-psqlrc', '--set' => 'ON_ERROR_STOP=1',
+	],
+	'<' => \$stdin,
+	'>' => \$stdout,
+	'2>' => \$stderr);
 
 # Send sleep command and wait until the server has registered it
 $stdin = "select pg_sleep($PostgreSQL::Test::Utils::timeout_default);\n";
diff --git a/src/interfaces/libpq/t/001_uri.pl b/src/interfaces/libpq/t/001_uri.pl
index ee4944ed18f..b0edcb3be88 100644
--- a/src/interfaces/libpq/t/001_uri.pl
+++ b/src/interfaces/libpq/t/001_uri.pl
@@ -268,8 +268,9 @@ sub test_uri
 	%ENV = (%ENV, %envvars);
 
 	my $cmd = [ 'libpq_uri_regress', $uri ];
-	$result{exit} = IPC::Run::run $cmd, '>', \$result{stdout}, '2>',
-	  \$result{stderr};
+	$result{exit} = IPC::Run::run $cmd,
+	  '>' => \$result{stdout},
+	  '2>' => \$result{stderr};
 
 	chomp($result{stdout});
 	chomp($result{stderr});
diff --git a/src/interfaces/libpq/t/005_negotiate_encryption.pl b/src/interfaces/libpq/t/005_negotiate_encryption.pl
index c834fa5149a..f6a453c1b41 100644
--- a/src/interfaces/libpq/t/005_negotiate_encryption.pl
+++ b/src/interfaces/libpq/t/005_negotiate_encryption.pl
@@ -673,7 +673,9 @@ sub connect_test
 	my ($ret, $stdout, $stderr) = $node->psql(
 		'postgres',
 		'',
-		extra_params => [ '-w', '-c', 'SELECT current_enc()' ],
+		extra_params => [
+			'--no-password', '--command' => 'SELECT current_enc()',
+		],
 		connstr => "$connstr_full",
 		on_error_stop => 0);
 
diff --git a/src/test/modules/commit_ts/t/003_standby_2.pl b/src/test/modules/commit_ts/t/003_standby_2.pl
index 69ffd969e10..3b27da6b237 100644
--- a/src/test/modules/commit_ts/t/003_standby_2.pl
+++ b/src/test/modules/commit_ts/t/003_standby_2.pl
@@ -57,7 +57,7 @@ $primary->restart;
 $primary->append_conf('postgresql.conf', 'track_commit_timestamp = off');
 $primary->restart;
 
-system_or_bail('pg_ctl', '-D', $standby->data_dir, 'promote');
+system_or_bail('pg_ctl', '--pgdata' => $standby->data_dir, 'promote');
 
 $standby->safe_psql('postgres', "create table t11()");
 my $standby_ts = $standby->safe_psql('postgres',
diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
index e301c378264..9691b850418 100644
--- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
+++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
@@ -35,7 +35,7 @@ mkdir "$PostgreSQL::Test::Utils::tmp_check/traces";
 
 for my $testname (@tests)
 {
-	my @extraargs = ('-r', $numrows);
+	my @extraargs = ('-r' => $numrows);
 	my $cmptrace = grep(/^$testname$/,
 		qw(simple_pipeline nosync multi_pipelines prepared singlerow
 		  pipeline_abort pipeline_idle transaction
@@ -46,7 +46,7 @@ for my $testname (@tests)
 	  "$PostgreSQL::Test::Utils::tmp_check/traces/$testname.trace";
 	if ($cmptrace)
 	{
-		push @extraargs, "-t", $traceout;
+		push @extraargs, "-t" => $traceout;
 	}
 
 	# Execute the test
diff --git a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
index 891e7f63e04..c16ff257180 100644
--- a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
+++ b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
@@ -62,9 +62,11 @@ like(
 $node->append_conf('postgresql.conf', "ssl_passphrase.passphrase = 'blurfl'");
 
 # try to start the server again
-my $ret =
-  PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $node->data_dir, '-l',
-	$node->logfile, 'start');
+my $ret = PostgreSQL::Test::Utils::system_log(
+	'pg_ctl',
+	'--pgdata' => $node->data_dir,
+	'--log' => $node->logfile,
+	'start');
 
 
 # with a bad passphrase the server should not start
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
index 9b2a90b0469..3c3c6db3512 100644
--- a/src/test/modules/test_pg_dump/t/001_base.pl
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -47,131 +47,149 @@ my %pgdump_runs = (
 	binary_upgrade => {
 		dump_cmd => [
 			'pg_dump', '--no-sync',
-			"--file=$tempdir/binary_upgrade.sql", '--schema-only',
-			'--binary-upgrade', '--dbname=postgres',
+			'--file' => "$tempdir/binary_upgrade.sql",
+			'--schema-only', '--binary-upgrade',
+			'--dbname' => 'postgres',
 		],
 	},
 	clean => {
 		dump_cmd => [
-			'pg_dump', "--file=$tempdir/clean.sql",
-			'-c', '--no-sync',
-			'--dbname=postgres',
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/clean.sql",
+			'--clean',
+			'--dbname' => 'postgres',
 		],
 	},
 	clean_if_exists => {
 		dump_cmd => [
-			'pg_dump',
-			'--no-sync',
-			"--file=$tempdir/clean_if_exists.sql",
-			'-c',
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/clean_if_exists.sql",
+			'--clean',
 			'--if-exists',
-			'--encoding=UTF8',    # no-op, just tests that option is accepted
+			'--encoding' => 'UTF8',    # no-op, just tests that it's accepted
 			'postgres',
 		],
 	},
 	createdb => {
 		dump_cmd => [
-			'pg_dump',
-			'--no-sync',
-			"--file=$tempdir/createdb.sql",
-			'-C',
-			'-R',                 # no-op, just for testing
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/createdb.sql",
+			'--create',
+			'--no-reconnect',          # no-op, just for testing
 			'postgres',
 		],
 	},
 	data_only => {
 		dump_cmd => [
-			'pg_dump',
-			'--no-sync',
-			"--file=$tempdir/data_only.sql",
-			'-a',
-			'-v',                 # no-op, just make sure it works
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/data_only.sql",
+			'--data-only',
+			'--verbose',               # no-op, just make sure it works
 			'postgres',
 		],
 	},
 	defaults => {
-		dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ],
+		dump_cmd => [
+			'pg_dump',
+			'--file' => "$tempdir/defaults.sql",
+			'postgres',
+		],
 	},
 	defaults_custom_format => {
 		test_key => 'defaults',
 		compile_option => 'gzip',
 		dump_cmd => [
-			'pg_dump', '--no-sync', '-Fc', '-Z6',
-			"--file=$tempdir/defaults_custom_format.dump", 'postgres',
+			'pg_dump', '--no-sync',
+			'--format' => 'custom',
+			'--compress' => 6,
+			'--file' => "$tempdir/defaults_custom_format.dump",
+			'postgres',
 		],
 		restore_cmd => [
 			'pg_restore',
-			"--file=$tempdir/defaults_custom_format.sql",
+			'--file' => "$tempdir/defaults_custom_format.sql",
 			"$tempdir/defaults_custom_format.dump",
 		],
 	},
 	defaults_dir_format => {
 		test_key => 'defaults',
 		dump_cmd => [
-			'pg_dump', '--no-sync', '-Fd',
-			"--file=$tempdir/defaults_dir_format", 'postgres',
+			'pg_dump', '--no-sync',
+			'--format' => 'directory',
+			'--file' => "$tempdir/defaults_dir_format",
+			'postgres',
 		],
 		restore_cmd => [
 			'pg_restore',
-			"--file=$tempdir/defaults_dir_format.sql",
+			'--file' => "$tempdir/defaults_dir_format.sql",
 			"$tempdir/defaults_dir_format",
 		],
 	},
 	defaults_parallel => {
 		test_key => 'defaults',
 		dump_cmd => [
-			'pg_dump', '--no-sync', '-Fd', '-j2',
-			"--file=$tempdir/defaults_parallel", 'postgres',
+			'pg_dump', '--no-sync',
+			'--format' => 'directory',
+			'--jobs' => 2,
+			'--file' => "$tempdir/defaults_parallel",
+			'postgres',
 		],
 		restore_cmd => [
 			'pg_restore',
-			"--file=$tempdir/defaults_parallel.sql",
+			'--file' => "$tempdir/defaults_parallel.sql",
 			"$tempdir/defaults_parallel",
 		],
 	},
 	defaults_tar_format => {
 		test_key => 'defaults',
 		dump_cmd => [
-			'pg_dump', '--no-sync', '-Ft',
-			"--file=$tempdir/defaults_tar_format.tar", 'postgres',
+			'pg_dump', '--no-sync',
+			'--format' => 'tar',
+			'--file' => "$tempdir/defaults_tar_format.tar",
+			'postgres',
 		],
 		restore_cmd => [
 			'pg_restore',
-			"--file=$tempdir/defaults_tar_format.sql",
+			'--file' => "$tempdir/defaults_tar_format.sql",
 			"$tempdir/defaults_tar_format.tar",
 		],
 	},
 	exclude_table => {
 		dump_cmd => [
 			'pg_dump',
-			'--exclude-table=regress_table_dumpable',
-			"--file=$tempdir/exclude_table.sql",
+			'--exclude-table' => 'regress_table_dumpable',
+			'--file' => "$tempdir/exclude_table.sql",
 			'postgres',
 		],
 	},
 	extension_schema => {
 		dump_cmd => [
-			'pg_dump', '--schema=public',
-			"--file=$tempdir/extension_schema.sql", 'postgres',
+			'pg_dump', '--no-sync',
+			'--schema' => 'public',
+			'--file' => "$tempdir/extension_schema.sql",
+			'postgres',
 		],
 	},
 	pg_dumpall_globals => {
 		dump_cmd => [
 			'pg_dumpall', '--no-sync',
-			"--file=$tempdir/pg_dumpall_globals.sql", '-g',
+			'--file' => "$tempdir/pg_dumpall_globals.sql",
+			'--globals-only',
 		],
 	},
 	no_privs => {
 		dump_cmd => [
 			'pg_dump', '--no-sync',
-			"--file=$tempdir/no_privs.sql", '-x',
+			'--file' => "$tempdir/no_privs.sql",
+			'--no-privileges',
 			'postgres',
 		],
 	},
 	no_owner => {
 		dump_cmd => [
 			'pg_dump', '--no-sync',
-			"--file=$tempdir/no_owner.sql", '-O',
+			'--file' => "$tempdir/no_owner.sql",
+			'--no-owner',
 			'postgres',
 		],
 	},
@@ -181,59 +199,68 @@ my %pgdump_runs = (
 	privileged_internals => {
 		dump_cmd => [
 			'pg_dump', '--no-sync',
-			"--file=$tempdir/privileged_internals.sql",
+			'--file' => "$tempdir/privileged_internals.sql",
 			# these two tables are irrelevant to the test case
-			'--exclude-table=regress_pg_dump_schema.external_tab',
-			'--exclude-table=regress_pg_dump_schema.extdependtab',
-			'--username=regress_dump_login_role', 'postgres',
+			'--exclude-table' => 'regress_pg_dump_schema.external_tab',
+			'--exclude-table' => 'regress_pg_dump_schema.extdependtab',
+			'--username' => 'regress_dump_login_role',
+			'postgres',
 		],
 	},
 
 	schema_only => {
 		dump_cmd => [
-			'pg_dump', '--no-sync', "--file=$tempdir/schema_only.sql",
-			'-s', 'postgres',
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/schema_only.sql",
+			'--schema-only', 'postgres',
 		],
 	},
 	section_pre_data => {
 		dump_cmd => [
 			'pg_dump', '--no-sync',
-			"--file=$tempdir/section_pre_data.sql", '--section=pre-data',
+			'--file' => "$tempdir/section_pre_data.sql",
+			'--section' => 'pre-data',
 			'postgres',
 		],
 	},
 	section_data => {
 		dump_cmd => [
 			'pg_dump', '--no-sync',
-			"--file=$tempdir/section_data.sql", '--section=data',
+			'--file' => "$tempdir/section_data.sql",
+			'--section' => 'data',
 			'postgres',
 		],
 	},
 	section_post_data => {
 		dump_cmd => [
-			'pg_dump', '--no-sync', "--file=$tempdir/section_post_data.sql",
-			'--section=post-data', 'postgres',
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/section_post_data.sql",
+			'--section' => 'post-data',
+			'postgres',
 		],
 	},
 	with_extension => {
 		dump_cmd => [
-			'pg_dump', '--no-sync', "--file=$tempdir/with_extension.sql",
-			'--extension=test_pg_dump', 'postgres',
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/with_extension.sql",
+			'--extension' => 'test_pg_dump',
+			'postgres',
 		],
 	},
 	exclude_extension => {
 		dump_cmd => [
 			'pg_dump', '--no-sync',
-			"--file=$tempdir/exclude_extension.sql",
-			'--exclude-extension=test_pg_dump', 'postgres',
+			'--file' => "$tempdir/exclude_extension.sql",
+			'--exclude-extension' => 'test_pg_dump',
+			'postgres',
 		],
 	},
 	exclude_extension_filter => {
 		dump_cmd => [
 			'pg_dump',
 			'--no-sync',
-			"--file=$tempdir/exclude_extension_filter.sql",
-			"--filter=$tempdir/exclude_extension_filter.txt",
+			'--file' => "$tempdir/exclude_extension_filter.sql",
+			'--filter' => "$tempdir/exclude_extension_filter.txt",
 			'postgres',
 		],
 	},
@@ -241,8 +268,10 @@ my %pgdump_runs = (
 	# plpgsql in the list blocks the dump of extension test_pg_dump
 	without_extension => {
 		dump_cmd => [
-			'pg_dump', '--no-sync', "--file=$tempdir/without_extension.sql",
-			'--extension=plpgsql', 'postgres',
+			'pg_dump', '--no-sync',
+			'--file' => "$tempdir/without_extension.sql",
+			'--extension' => 'plpgsql',
+			'postgres',
 		],
 	},
 
@@ -253,9 +282,9 @@ my %pgdump_runs = (
 		dump_cmd => [
 			'pg_dump',
 			'--no-sync',
-			"--file=$tempdir/without_extension_explicit_schema.sql",
-			'--extension=plpgsql',
-			'--schema=public',
+			'--file' => "$tempdir/without_extension_explicit_schema.sql",
+			'--extension' => 'plpgsql',
+			'--schema' => 'public',
 			'postgres',
 		],
 	},
@@ -267,9 +296,9 @@ my %pgdump_runs = (
 		dump_cmd => [
 			'pg_dump',
 			'--no-sync',
-			"--file=$tempdir/without_extension_internal_schema.sql",
-			'--extension=plpgsql',
-			'--schema=regress_pg_dump_schema',
+			'--file' => "$tempdir/without_extension_internal_schema.sql",
+			'--extension' => 'plpgsql',
+			'--schema' => 'regress_pg_dump_schema',
 			'postgres',
 		],
 	},);
@@ -840,7 +869,7 @@ my %tests = (
 # Create a PG instance to test actually dumping from
 
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('auth_extra' => [ '--create-role', 'regress_dump_login_role' ]);
+$node->init('auth_extra' => [ '--create-role' => 'regress_dump_login_role' ]);
 $node->start;
 
 my $port = $node->port;
diff --git a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
index c611a61cf4e..60bbd5dd445 100644
--- a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
+++ b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
@@ -108,14 +108,17 @@ sub new
 	if ($interactive)
 	{
 		$run = IPC::Run::start $psql_params,
-		  '<pty<', \$psql->{stdin}, '>pty>', \$psql->{stdout}, '2>',
-		  \$psql->{stderr},
+		  '<pty<' => \$psql->{stdin},
+		  '>pty>' => \$psql->{stdout},
+		  '2>' => \$psql->{stderr},
 		  $psql->{timeout};
 	}
 	else
 	{
 		$run = IPC::Run::start $psql_params,
-		  '<', \$psql->{stdin}, '>', \$psql->{stdout}, '2>', \$psql->{stderr},
+		  '<' => \$psql->{stdin},
+		  '>' => \$psql->{stdout},
+		  '2>' => \$psql->{stderr},
 		  $psql->{timeout};
 	}
 
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index b105cba05a6..0750915a9a8 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -640,8 +640,11 @@ sub init
 		or !defined $ENV{INITDB_TEMPLATE})
 	{
 		note("initializing database system by running initdb");
-		PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A',
-			'trust', '-N', @{ $params{extra} });
+		PostgreSQL::Test::Utils::system_or_bail(
+			'initdb', '--no-sync',
+			'--pgdata' => $pgdata,
+			'--auth' => 'trust',
+			@{ $params{extra} });
 	}
 	else
 	{
@@ -838,11 +841,11 @@ sub backup
 
 	print "# Taking pg_basebackup $backup_name from node \"$name\"\n";
 	PostgreSQL::Test::Utils::system_or_bail(
-		'pg_basebackup', '-D',
-		$backup_path, '-h',
-		$self->host, '-p',
-		$self->port, '--checkpoint',
-		'fast', '--no-sync',
+		'pg_basebackup', '--no-sync',
+		'--pgdata' => $backup_path,
+		'--host' => $self->host,
+		'--port' => $self->port,
+		'--checkpoint' => 'fast',
 		@{ $params{backup_options} });
 	print "# Backup finished\n";
 	return;
@@ -946,7 +949,7 @@ sub init_from_backup
 		}
 
 		local %ENV = $self->_get_env();
-		my @combineargs = ('pg_combinebackup', '-d');
+		my @combineargs = ('pg_combinebackup', '--debug');
 		if (exists $params{tablespace_map})
 		{
 			while (my ($olddir, $newdir) = each %{ $params{tablespace_map} })
@@ -959,19 +962,21 @@ sub init_from_backup
 		{
 			push @combineargs, $params{combine_mode};
 		}
-		push @combineargs, @prior_backup_path, $backup_path, '-o', $data_path;
+		push @combineargs, @prior_backup_path, $backup_path,
+		  '--output' => $data_path;
 		PostgreSQL::Test::Utils::system_or_bail(@combineargs);
 	}
 	elsif (defined $params{tar_program})
 	{
 		mkdir($data_path) || die "mkdir $data_path: $!";
-		PostgreSQL::Test::Utils::system_or_bail($params{tar_program}, 'xf',
-			$backup_path . '/base.tar',
-			'-C', $data_path);
 		PostgreSQL::Test::Utils::system_or_bail(
-			$params{tar_program}, 'xf',
-			$backup_path . '/pg_wal.tar', '-C',
-			$data_path . '/pg_wal');
+			$params{tar_program},
+			'xf' => $backup_path . '/base.tar',
+			'-C' => $data_path);
+		PostgreSQL::Test::Utils::system_or_bail(
+			$params{tar_program},
+			'xf' => $backup_path . '/pg_wal.tar',
+			'-C' => $data_path . '/pg_wal');
 
 		# We need to generate a tablespace_map file.
 		open(my $tsmap, ">", "$data_path/tablespace_map")
@@ -991,9 +996,10 @@ sub init_from_backup
 			my $newdir = $params{tablespace_map}{$tsoid};
 
 			mkdir($newdir) || die "mkdir $newdir: $!";
-			PostgreSQL::Test::Utils::system_or_bail($params{tar_program},
-				'xf', $backup_path . '/' . $tstar,
-				'-C', $newdir);
+			PostgreSQL::Test::Utils::system_or_bail(
+				$params{tar_program},
+				'xf' => $backup_path . '/' . $tstar,
+				'-C' => $newdir);
 
 			my $escaped_newdir = $newdir;
 			$escaped_newdir =~ s/\\/\\\\/g;
@@ -1133,8 +1139,10 @@ sub start
 	# -w is now the default but having it here does no harm and helps
 	# compatibility with older versions.
 	$ret = PostgreSQL::Test::Utils::system_log(
-		'pg_ctl', '-w', '-D', $self->data_dir,
-		'-l', $self->logfile, '-o', "--cluster-name=$name",
+		'pg_ctl', '--wait',
+		'--pgdata' => $self->data_dir,
+		'--log' => $self->logfile,
+		'--options' => "--cluster-name=$name",
 		'start');
 
 	if ($ret != 0)
@@ -1211,10 +1219,10 @@ sub stop
 	return 1 unless defined $self->{_pid};
 
 	print "### Stopping node \"$name\" using mode $mode\n";
-	my @cmd = ('pg_ctl', '-D', $pgdata, '-m', $mode, 'stop');
+	my @cmd = ('pg_ctl', '--pgdata' => $pgdata, '--mode' => $mode, 'stop');
 	if ($params{timeout})
 	{
-		push(@cmd, ('--timeout', $params{timeout}));
+		push(@cmd, ('--timeout' => $params{timeout}));
 	}
 	$ret = PostgreSQL::Test::Utils::system_log(@cmd);
 
@@ -1251,7 +1259,9 @@ sub reload
 	local %ENV = $self->_get_env();
 
 	print "### Reloading node \"$name\"\n";
-	PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata,
+	PostgreSQL::Test::Utils::system_or_bail(
+		'pg_ctl',
+		'--pgdata' => $pgdata,
 		'reload');
 	return;
 }
@@ -1279,8 +1289,11 @@ sub restart
 
 	# -w is now the default but having it here does no harm and helps
 	# compatibility with older versions.
-	$ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D',
-		$self->data_dir, '-l', $self->logfile, 'restart');
+	$ret = PostgreSQL::Test::Utils::system_log(
+		'pg_ctl', '--wait',
+		'--pgdata' => $self->data_dir,
+		'--log' => $self->logfile,
+		'restart');
 
 	if ($ret != 0)
 	{
@@ -1318,8 +1331,11 @@ sub promote
 	local %ENV = $self->_get_env();
 
 	print "### Promoting node \"$name\"\n";
-	PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
-		$logfile, 'promote');
+	PostgreSQL::Test::Utils::system_or_bail(
+		'pg_ctl',
+		'--pgdata' => $pgdata,
+		'--log' => $logfile,
+		'promote');
 	return;
 }
 
@@ -1342,8 +1358,11 @@ sub logrotate
 	local %ENV = $self->_get_env();
 
 	print "### Rotating log in node \"$name\"\n";
-	PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
-		$logfile, 'logrotate');
+	PostgreSQL::Test::Utils::system_or_bail(
+		'pg_ctl',
+		'--pgdata' => $pgdata,
+		'--log' => $logfile,
+		'logrotate');
 	return;
 }
 
@@ -2120,7 +2139,9 @@ sub psql
 
 	my @psql_params = (
 		$self->installed_command('psql'),
-		'-XAtq', '-d', $psql_connstr, '-f', '-');
+		'--no-psqlrc', '--no-align', '--tuples-only', '--quiet',
+		'--dbname' => $psql_connstr,
+		'--file' => '-');
 
 	# If the caller wants an array and hasn't passed stdout/stderr
 	# references, allocate temporary ones to capture them so we
@@ -2142,7 +2163,8 @@ sub psql
 	$params{on_error_stop} = 1 unless defined $params{on_error_stop};
 	$params{on_error_die} = 0 unless defined $params{on_error_die};
 
-	push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
+	push @psql_params, '--variable' => 'ON_ERROR_STOP=1'
+	  if $params{on_error_stop};
 	push @psql_params, @{ $params{extra_params} }
 	  if defined $params{extra_params};
 
@@ -2168,9 +2190,9 @@ sub psql
 	{
 		local $@;
 		eval {
-			my @ipcrun_opts = (\@psql_params, '<', \$sql);
-			push @ipcrun_opts, '>', $stdout if defined $stdout;
-			push @ipcrun_opts, '2>', $stderr if defined $stderr;
+			my @ipcrun_opts = (\@psql_params, '<' => \$sql);
+			push @ipcrun_opts, '>' => $stdout if defined $stdout;
+			push @ipcrun_opts, '2>' => $stderr if defined $stderr;
 			push @ipcrun_opts, $timeout if defined $timeout;
 
 			IPC::Run::run @ipcrun_opts;
@@ -2325,13 +2347,16 @@ sub background_psql
 
 	my @psql_params = (
 		$self->installed_command('psql'),
-		'-XAtq', '-d', $psql_connstr, '-f', '-');
+		'--no-psqlrc', '--no-align',
+		'--tuples-only', '--quiet',
+		'--dbname' => $psql_connstr,
+		'--file' => '-');
 
 	$params{on_error_stop} = 1 unless defined $params{on_error_stop};
 	$params{wait} = 1 unless defined $params{wait};
 	$timeout = $params{timeout} if defined $params{timeout};
 
-	push @psql_params, '-v', 'ON_ERROR_STOP=1' if $params{on_error_stop};
+	push @psql_params, '--set' => 'ON_ERROR_STOP=1' if $params{on_error_stop};
 	push @psql_params, @{ $params{extra_params} }
 	  if defined $params{extra_params};
 
@@ -2402,7 +2427,8 @@ sub interactive_psql
 
 	my @psql_params = (
 		$self->installed_command('psql'),
-		'-XAt', '-d', $self->connstr($dbname));
+		'--no-psqlrc', '--no-align', '--tuples-only',
+		'--dbname' => $self->connstr($dbname));
 
 	push @psql_params, @{ $params{extra_params} }
 	  if defined $params{extra_params};
@@ -2424,7 +2450,7 @@ sub _pgbench_make_files
 		for my $fn (sort keys %$files)
 		{
 			my $filename = $self->basedir . '/' . $fn;
-			push @file_opts, '-f', $filename;
+			push @file_opts, '--file' => $filename;
 
 			# cleanup file weight
 			$filename =~ s/\@\d+$//;
@@ -2650,8 +2676,9 @@ sub poll_query_until
 	$expected = 't' unless defined($expected);    # default value
 
 	my $cmd = [
-		$self->installed_command('psql'), '-XAt',
-		'-d', $self->connstr($dbname)
+		$self->installed_command('psql'), '--no-psqlrc',
+		'--no-align', '--tuples-only',
+		'--dbname' => $self->connstr($dbname)
 	];
 	my ($stdout, $stderr);
 	my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
@@ -2659,8 +2686,10 @@ sub poll_query_until
 
 	while ($attempts < $max_attempts)
 	{
-		my $result = IPC::Run::run $cmd, '<', \$query,
-		  '>', \$stdout, '2>', \$stderr;
+		my $result = IPC::Run::run $cmd,
+		  '<' => \$query,
+		  '>' => \$stdout,
+		  '2>' => \$stderr;
 
 		chomp($stdout);
 		chomp($stderr);
@@ -3534,15 +3563,17 @@ sub pg_recvlogical_upto
 
 	my @cmd = (
 		$self->installed_command('pg_recvlogical'),
-		'-S', $slot_name, '--dbname', $self->connstr($dbname));
-	push @cmd, '--endpos', $endpos;
-	push @cmd, '-f', '-', '--no-loop', '--start';
+		'--slot' => $slot_name,
+		'--dbname' => $self->connstr($dbname),
+		'--endpos' => $endpos,
+		'--file' => '-',
+		'--no-loop', '--start');
 
 	while (my ($k, $v) = each %plugin_options)
 	{
 		croak "= is not permitted to appear in replication option name"
 		  if ($k =~ qr/=/);
-		push @cmd, "-o", "$k=$v";
+		push @cmd, "--option" => "$k=$v";
 	}
 
 	my $timeout;
@@ -3555,7 +3586,7 @@ sub pg_recvlogical_upto
 	{
 		local $@;
 		eval {
-			IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
+			IPC::Run::run(\@cmd, '>' => \$stdout, '2>' => \$stderr, $timeout);
 			$ret = $?;
 		};
 		my $exc_save = $@;
@@ -3669,15 +3700,14 @@ sub create_logical_slot_on_standby
 
 	$handle = IPC::Run::start(
 		[
-			'pg_recvlogical', '-d',
-			$self->connstr($dbname), '-P',
-			'test_decoding', '-S',
-			$slot_name, '--create-slot'
+			'pg_recvlogical',
+			'--dbname' => $self->connstr($dbname),
+			'--plugin' => 'test_decoding',
+			'--slot' => $slot_name,
+			'--create-slot'
 		],
-		'>',
-		\$stdout,
-		'2>',
-		\$stderr);
+		'>' => \$stdout,
+		'2>' => \$stderr);
 
 	# Arrange for the xl_running_xacts record for which pg_recvlogical is
 	# waiting.
diff --git a/src/test/perl/PostgreSQL/Test/Kerberos.pm b/src/test/perl/PostgreSQL/Test/Kerberos.pm
index 118ea1e103e..b72dd2fbaf4 100644
--- a/src/test/perl/PostgreSQL/Test/Kerberos.pm
+++ b/src/test/perl/PostgreSQL/Test/Kerberos.pm
@@ -104,7 +104,7 @@ sub new
 	my ($host, $hostaddr, $realm) = @_;
 
 	my ($stdout, $krb5_version);
-	run_log [ $krb5_config, '--version' ], '>', \$stdout
+	run_log [ $krb5_config, '--version' ], '>' => \$stdout
 	  or BAIL_OUT("could not execute krb5-config");
 	BAIL_OUT("Heimdal is not supported") if $stdout =~ m/heimdal/;
 	$stdout =~ m/Kerberos 5 release ([0-9]+\.[0-9]+)/
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index efe0321a4ef..d1ad131eadf 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -20,7 +20,7 @@ PostgreSQL::Test::Utils - helper module for writing PostgreSQL's C<prove> tests.
   command_fails(['initdb', '--invalid-option'],
               'command fails with invalid option');
   my $tempdir = PostgreSQL::Test::Utils::tempdir;
-  command_ok('initdb', '-D', $tempdir);
+  command_ok('initdb', '--pgdata' => $tempdir);
 
   # Miscellanea
   print "on Windows" if $PostgreSQL::Test::Utils::windows_os;
@@ -333,7 +333,7 @@ sub has_wal_read_bug
 	return
 		 $Config{osname} eq 'linux'
 	  && $Config{archname} =~ /^sparc/
-	  && !run_log([ qw(df -x ext4), $tmp_check ], '>', '/dev/null', '2>&1');
+	  && !run_log([ qw(df -x ext4), $tmp_check ], '&>' => '/dev/null');
 }
 
 =pod
@@ -419,7 +419,7 @@ sub run_command
 {
 	my ($cmd) = @_;
 	my ($stdout, $stderr);
-	my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+	my $result = IPC::Run::run $cmd, '>' => \$stdout, '2>' => \$stderr;
 	chomp($stdout);
 	chomp($stderr);
 	return ($stdout, $stderr);
@@ -723,8 +723,9 @@ sub scan_server_header
 	my ($header_path, $regexp) = @_;
 
 	my ($stdout, $stderr);
-	my $result = IPC::Run::run [ 'pg_config', '--includedir-server' ], '>',
-	  \$stdout, '2>', \$stderr
+	my $result = IPC::Run::run [ 'pg_config', '--includedir-server' ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr
 	  or die "could not execute pg_config";
 	chomp($stdout);
 	$stdout =~ s/\r$//;
@@ -761,8 +762,9 @@ sub check_pg_config
 {
 	my ($regexp) = @_;
 	my ($stdout, $stderr);
-	my $result = IPC::Run::run [ 'pg_config', '--includedir' ], '>',
-	  \$stdout, '2>', \$stderr
+	my $result = IPC::Run::run [ 'pg_config', '--includedir' ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr
 	  or die "could not execute pg_config";
 	chomp($stdout);
 	$stdout =~ s/\r$//;
@@ -925,8 +927,9 @@ sub program_help_ok
 	my ($cmd) = @_;
 	my ($stdout, $stderr);
 	print("# Running: $cmd --help\n");
-	my $result = IPC::Run::run [ $cmd, '--help' ], '>', \$stdout, '2>',
-	  \$stderr;
+	my $result = IPC::Run::run [ $cmd, '--help' ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr;
 	ok($result, "$cmd --help exit code 0");
 	isnt($stdout, '', "$cmd --help goes to stdout");
 	is($stderr, '', "$cmd --help nothing to stderr");
@@ -956,8 +959,9 @@ sub program_version_ok
 	my ($cmd) = @_;
 	my ($stdout, $stderr);
 	print("# Running: $cmd --version\n");
-	my $result = IPC::Run::run [ $cmd, '--version' ], '>', \$stdout, '2>',
-	  \$stderr;
+	my $result = IPC::Run::run [ $cmd, '--version' ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr;
 	ok($result, "$cmd --version exit code 0");
 	isnt($stdout, '', "$cmd --version goes to stdout");
 	is($stderr, '', "$cmd --version nothing to stderr");
@@ -979,9 +983,9 @@ sub program_options_handling_ok
 	my ($cmd) = @_;
 	my ($stdout, $stderr);
 	print("# Running: $cmd --not-a-valid-option\n");
-	my $result = IPC::Run::run [ $cmd, '--not-a-valid-option' ], '>',
-	  \$stdout,
-	  '2>', \$stderr;
+	my $result = IPC::Run::run [ $cmd, '--not-a-valid-option' ],
+	  '>' => \$stdout,
+	  '2>' => \$stderr;
 	ok(!$result, "$cmd with invalid option nonzero exit code");
 	isnt($stderr, '', "$cmd with invalid option prints error message");
 	return;
@@ -1002,7 +1006,7 @@ sub command_like
 	my ($cmd, $expected_stdout, $test_name) = @_;
 	my ($stdout, $stderr);
 	print("# Running: " . join(" ", @{$cmd}) . "\n");
-	my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+	my $result = IPC::Run::run $cmd, '>' => \$stdout, '2>' => \$stderr;
 	ok($result, "$test_name: exit code 0");
 	is($stderr, '', "$test_name: no stderr");
 	like($stdout, $expected_stdout, "$test_name: matches");
@@ -1031,7 +1035,7 @@ sub command_like_safe
 	my $stdoutfile = File::Temp->new();
 	my $stderrfile = File::Temp->new();
 	print("# Running: " . join(" ", @{$cmd}) . "\n");
-	my $result = IPC::Run::run $cmd, '>', $stdoutfile, '2>', $stderrfile;
+	my $result = IPC::Run::run $cmd, '>' => $stdoutfile, '2>' => $stderrfile;
 	$stdout = slurp_file($stdoutfile);
 	$stderr = slurp_file($stderrfile);
 	ok($result, "$test_name: exit code 0");
@@ -1055,7 +1059,7 @@ sub command_fails_like
 	my ($cmd, $expected_stderr, $test_name) = @_;
 	my ($stdout, $stderr);
 	print("# Running: " . join(" ", @{$cmd}) . "\n");
-	my $result = IPC::Run::run $cmd, '>', \$stdout, '2>', \$stderr;
+	my $result = IPC::Run::run $cmd, '>' => \$stdout, '2>' => \$stderr;
 	ok(!$result, "$test_name: exit code not 0");
 	like($stderr, $expected_stderr, "$test_name: matches");
 	return;
@@ -1093,7 +1097,7 @@ sub command_checks_all
 	# run command
 	my ($stdout, $stderr);
 	print("# Running: " . join(" ", @{$cmd}) . "\n");
-	IPC::Run::run($cmd, '>', \$stdout, '2>', \$stderr);
+	IPC::Run::run($cmd, '>' => \$stdout, '2>' => \$stderr);
 
 	# See http://perldoc.perl.org/perlvar.html#%24CHILD_ERROR
 	my $ret = $?;
diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl
index fb89a76c005..85f5ef03dec 100644
--- a/src/test/postmaster/t/002_connection_limits.pl
+++ b/src/test/postmaster/t/002_connection_limits.pl
@@ -14,7 +14,8 @@ use Test::More;
 my $node = PostgreSQL::Test::Cluster->new('primary');
 $node->init(
 	'auth_extra' => [
-		'--create-role', 'regress_regular,regress_reserved,regress_superuser'
+		'--create-role' =>
+		  'regress_regular,regress_reserved,regress_superuser',
 	]);
 $node->append_conf('postgresql.conf', "max_connections = 6");
 $node->append_conf('postgresql.conf', "reserved_connections = 2");
@@ -43,7 +44,7 @@ sub background_psql_as_user
 	return $node->background_psql(
 		'postgres',
 		on_error_die => 1,
-		extra_params => [ '-U', $user ]);
+		extra_params => [ '--username' => $user ]);
 }
 
 # Like connect_fails(), except that we also wait for the failed backend to
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index 3945f00ab88..ccd8417d449 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -14,7 +14,7 @@ my $node_primary = PostgreSQL::Test::Cluster->new('primary');
 # and it needs proper authentication configuration.
 $node_primary->init(
 	allows_streaming => 1,
-	auth_extra => [ '--create-role', 'repl_role' ]);
+	auth_extra => [ '--create-role' => 'repl_role' ]);
 $node_primary->start;
 my $backup_name = 'my_backup';
 
@@ -146,9 +146,13 @@ sub test_target_session_attrs
 	# we connected to.  Note we must pass the SQL command via the command
 	# line not stdin, else Perl may spit up trying to write to stdin of
 	# an already-failed psql process.
-	my ($ret, $stdout, $stderr) =
-	  $node1->psql('postgres', undef,
-		extra_params => [ '-d', $connstr, '-c', 'SHOW port;' ]);
+	my ($ret, $stdout, $stderr) = $node1->psql(
+		'postgres',
+		undef,
+		extra_params => [
+			'--dbname' => $connstr,
+			'--command' => 'SHOW port;',
+		]);
 	if ($status == 0)
 	{
 		is( $status == $ret && $stdout eq $target_port,
@@ -257,26 +261,26 @@ my $connstr_db = "$connstr_common replication=database dbname=postgres";
 my ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW ALL;',
 	on_error_die => 1,
-	extra_params => [ '-d', $connstr_rep ]);
+	extra_params => [ '--dbname' => $connstr_rep ]);
 ok($ret == 0, "SHOW ALL with replication role and physical replication");
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW ALL;',
 	on_error_die => 1,
-	extra_params => [ '-d', $connstr_db ]);
+	extra_params => [ '--dbname' => $connstr_db ]);
 ok($ret == 0, "SHOW ALL with replication role and logical replication");
 
 # Test SHOW with a user-settable parameter
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW work_mem;',
 	on_error_die => 1,
-	extra_params => [ '-d', $connstr_rep ]);
+	extra_params => [ '--dbname' => $connstr_rep ]);
 ok( $ret == 0,
 	"SHOW with user-settable parameter, replication role and physical replication"
 );
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW work_mem;',
 	on_error_die => 1,
-	extra_params => [ '-d', $connstr_db ]);
+	extra_params => [ '--dbname' => $connstr_db ]);
 ok( $ret == 0,
 	"SHOW with user-settable parameter, replication role and logical replication"
 );
@@ -285,14 +289,14 @@ ok( $ret == 0,
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW primary_conninfo;',
 	on_error_die => 1,
-	extra_params => [ '-d', $connstr_rep ]);
+	extra_params => [ '--dbname' => $connstr_rep ]);
 ok( $ret == 0,
 	"SHOW with superuser-settable parameter, replication role and physical replication"
 );
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres', 'SHOW primary_conninfo;',
 	on_error_die => 1,
-	extra_params => [ '-d', $connstr_db ]);
+	extra_params => [ '--dbname' => $connstr_db ]);
 ok( $ret == 0,
 	"SHOW with superuser-settable parameter, replication role and logical replication"
 );
@@ -304,7 +308,7 @@ my $slotname = 'test_read_replication_slot_physical';
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres',
 	'READ_REPLICATION_SLOT non_existent_slot;',
-	extra_params => [ '-d', $connstr_rep ]);
+	extra_params => [ '--dbname' => $connstr_rep ]);
 ok($ret == 0, "READ_REPLICATION_SLOT exit code 0 on success");
 like($stdout, qr/^\|\|$/,
 	"READ_REPLICATION_SLOT returns NULL values if slot does not exist");
@@ -312,12 +316,12 @@ like($stdout, qr/^\|\|$/,
 $node_primary->psql(
 	'postgres',
 	"CREATE_REPLICATION_SLOT $slotname PHYSICAL RESERVE_WAL;",
-	extra_params => [ '-d', $connstr_rep ]);
+	extra_params => [ '--dbname' => $connstr_rep ]);
 
 ($ret, $stdout, $stderr) = $node_primary->psql(
 	'postgres',
 	"READ_REPLICATION_SLOT $slotname;",
-	extra_params => [ '-d', $connstr_rep ]);
+	extra_params => [ '--dbname' => $connstr_rep ]);
 ok($ret == 0, "READ_REPLICATION_SLOT success with existing slot");
 like($stdout, qr/^physical\|[^|]*\|1$/,
 	"READ_REPLICATION_SLOT returns tuple with slot information");
@@ -325,7 +329,7 @@ like($stdout, qr/^physical\|[^|]*\|1$/,
 $node_primary->psql(
 	'postgres',
 	"DROP_REPLICATION_SLOT $slotname;",
-	extra_params => [ '-d', $connstr_rep ]);
+	extra_params => [ '--dbname' => $connstr_rep ]);
 
 note "switching to physical replication slot";
 
@@ -594,16 +598,14 @@ my $sigchld_bb_timeout =
 my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
 my $sigchld_bb = IPC::Run::start(
 	[
-		'psql', '-X', '-c', "BASE_BACKUP (CHECKPOINT 'fast', MAX_RATE 32);",
-		'-c', 'SELECT pg_backup_stop()',
-		'-d', $connstr
+		'psql', '--no-psqlrc',
+		'--command' => "BASE_BACKUP (CHECKPOINT 'fast', MAX_RATE 32);",
+		'--command' => 'SELECT pg_backup_stop()',
+		'--dbname' => $connstr
 	],
-	'<',
-	\$sigchld_bb_stdin,
-	'>',
-	\$sigchld_bb_stdout,
-	'2>',
-	\$sigchld_bb_stderr,
+	'<' => \$sigchld_bb_stdin,
+	'>' => \$sigchld_bb_stdout,
+	'2>' => \$sigchld_bb_stderr,
 	$sigchld_bb_timeout);
 
 # The cancellation is issued once the database files are streamed and
diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl
index 9f469101fd4..a5678bc4dc4 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -149,8 +149,11 @@ SKIP:
 
 	my $pg_recvlogical = IPC::Run::start(
 		[
-			'pg_recvlogical', '-d', $node_primary->connstr('otherdb'),
-			'-S', 'otherdb_slot', '-f', '-', '--start'
+			'pg_recvlogical',
+			'--dbname' => $node_primary->connstr('otherdb'),
+			'--slot' => 'otherdb_slot',
+			'--file' => '-',
+			'--start'
 		]);
 	$node_primary->poll_query_until('otherdb',
 		"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)"
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index cd848918d00..4e60806563f 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -34,30 +34,27 @@ $node->safe_psql(
 my ($killme_stdin, $killme_stdout, $killme_stderr) = ('', '', '');
 my $killme = IPC::Run::start(
 	[
-		'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
-		$node->connstr('postgres')
+		'psql', '--no-psqlrc', '--quiet', '--no-align', '--tuples-only',
+		'--set' => 'ON_ERROR_STOP=1',
+		'--file' => '-',
+		'--dbname' => $node->connstr('postgres')
 	],
-	'<',
-	\$killme_stdin,
-	'>',
-	\$killme_stdout,
-	'2>',
-	\$killme_stderr,
+	'<' => \$killme_stdin,
+	'>' => \$killme_stdout,
+	'2>' => \$killme_stderr,
 	$psql_timeout);
 
 # Need a second psql to check if crash-restart happened.
 my ($monitor_stdin, $monitor_stdout, $monitor_stderr) = ('', '', '');
 my $monitor = IPC::Run::start(
 	[
-		'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
-		$node->connstr('postgres')
+		'psql', '--no-psqlrc', '--quiet', '--no-align', '--tuples-only',
+		'--set' => 'ON_ERROR_STOP=1',
+		'--file', '-', '--dbname' => $node->connstr('postgres')
 	],
-	'<',
-	\$monitor_stdin,
-	'>',
-	\$monitor_stdout,
-	'2>',
-	\$monitor_stderr,
+	'<' => \$monitor_stdin,
+	'>' => \$monitor_stdout,
+	'2>' => \$monitor_stderr,
 	$psql_timeout);
 
 #create table, insert row that should survive
diff --git a/src/test/recovery/t/017_shm.pl b/src/test/recovery/t/017_shm.pl
index e2e85d471fe..c73aa3f0c2c 100644
--- a/src/test/recovery/t/017_shm.pl
+++ b/src/test/recovery/t/017_shm.pl
@@ -29,11 +29,11 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir;
 
 # Log "ipcs" diffs on a best-effort basis, swallowing any error.
 my $ipcs_before = "$tempdir/ipcs_before";
-eval { run_log [ 'ipcs', '-am' ], '>', $ipcs_before; };
+eval { run_log [ 'ipcs', '-am' ], '>' => $ipcs_before; };
 
 sub log_ipcs
 {
-	eval { run_log [ 'ipcs', '-am' ], '|', [ 'diff', $ipcs_before, '-' ] };
+	eval { run_log [ 'ipcs', '-am' ], '|' => [ 'diff', $ipcs_before, '-' ] };
 	return;
 }
 
@@ -122,15 +122,13 @@ my $slow_query = 'SELECT wait_pid(pg_backend_pid())';
 my ($stdout, $stderr);
 my $slow_client = IPC::Run::start(
 	[
-		'psql', '-X', '-qAt', '-d', $gnat->connstr('postgres'),
-		'-c', $slow_query
+		'psql', '--no-psqlrc', '--quiet', '--no-align', '--tuples-only',
+		'--dbname' => $gnat->connstr('postgres'),
+		'--command' => $slow_query
 	],
-	'<',
-	\undef,
-	'>',
-	\$stdout,
-	'2>',
-	\$stderr,
+	'<' => \undef,
+	'>' => \$stdout,
+	'2>' => \$stderr,
 	IPC::Run::timeout(5 * $PostgreSQL::Test::Utils::timeout_default));
 ok( $gnat->poll_query_until(
 		'postgres',
diff --git a/src/test/recovery/t/021_row_visibility.pl b/src/test/recovery/t/021_row_visibility.pl
index 53c0d108a3a..42740745bfd 100644
--- a/src/test/recovery/t/021_row_visibility.pl
+++ b/src/test/recovery/t/021_row_visibility.pl
@@ -38,24 +38,26 @@ my $psql_timeout =
 # to check uncommitted changes being replicated and such.
 my %psql_primary = (stdin => '', stdout => '', stderr => '');
 $psql_primary{run} = IPC::Run::start(
-	[ 'psql', '-XA', '-f', '-', '-d', $node_primary->connstr('postgres') ],
-	'<',
-	\$psql_primary{stdin},
-	'>',
-	\$psql_primary{stdout},
-	'2>',
-	\$psql_primary{stderr},
+	[
+		'psql', '--no-psqlrc', '--no-align',
+		'--file' => '-',
+		'--dbname' => $node_primary->connstr('postgres'),
+	],
+	'<' => \$psql_primary{stdin},
+	'>' => \$psql_primary{stdout},
+	'2>' => \$psql_primary{stderr},
 	$psql_timeout);
 
 my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => '');
 $psql_standby{run} = IPC::Run::start(
-	[ 'psql', '-XA', '-f', '-', '-d', $node_standby->connstr('postgres') ],
-	'<',
-	\$psql_standby{stdin},
-	'>',
-	\$psql_standby{stdout},
-	'2>',
-	\$psql_standby{stderr},
+	[
+		'psql', '--no-psqlrc', '--no-align',
+		'--file' => '-',
+		'--dbname' => $node_standby->connstr('postgres'),
+	],
+	'<' => \$psql_standby{stdin},
+	'>' => \$psql_standby{stdout},
+	'2>' => \$psql_standby{stderr},
 	$psql_timeout);
 
 #
diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl
index 483a416723f..50def031c96 100644
--- a/src/test/recovery/t/022_crash_temp_files.pl
+++ b/src/test/recovery/t/022_crash_temp_files.pl
@@ -38,15 +38,14 @@ $node->safe_psql('postgres', q[CREATE TABLE tab_crash (a integer UNIQUE);]);
 my ($killme_stdin, $killme_stdout, $killme_stderr) = ('', '', '');
 my $killme = IPC::Run::start(
 	[
-		'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
-		$node->connstr('postgres')
+		'psql', '--no-psqlrc', '--quiet', '--no-align', '--tuples-only',
+		'--set' => 'ON_ERROR_STOP=1',
+		'--file' => '-',
+		'--dbname' => $node->connstr('postgres')
 	],
-	'<',
-	\$killme_stdin,
-	'>',
-	\$killme_stdout,
-	'2>',
-	\$killme_stderr,
+	'<' => \$killme_stdin,
+	'>' => \$killme_stdout,
+	'2>' => \$killme_stderr,
 	$psql_timeout);
 
 # Get backend pid
@@ -66,15 +65,14 @@ $killme_stderr = '';
 my ($killme_stdin2, $killme_stdout2, $killme_stderr2) = ('', '', '');
 my $killme2 = IPC::Run::start(
 	[
-		'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
-		$node->connstr('postgres')
+		'psql', '--no-psqlrc', '--quiet', '--no-align', '--tuples-only',
+		'--set' => 'ON_ERROR_STOP=1',
+		'--file' => '-',
+		'--dbname' => $node->connstr('postgres')
 	],
-	'<',
-	\$killme_stdin2,
-	'>',
-	\$killme_stdout2,
-	'2>',
-	\$killme_stderr2,
+	'<' => \$killme_stdin2,
+	'>' => \$killme_stdout2,
+	'2>' => \$killme_stderr2,
 	$psql_timeout);
 
 # Insert one tuple and leave the transaction open
diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl
index ddb7223b337..492ef115ba4 100644
--- a/src/test/recovery/t/032_relfilenode_reuse.pl
+++ b/src/test/recovery/t/032_relfilenode_reuse.pl
@@ -36,24 +36,26 @@ my $psql_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default);
 
 my %psql_primary = (stdin => '', stdout => '', stderr => '');
 $psql_primary{run} = IPC::Run::start(
-	[ 'psql', '-XA', '-f', '-', '-d', $node_primary->connstr('postgres') ],
-	'<',
-	\$psql_primary{stdin},
-	'>',
-	\$psql_primary{stdout},
-	'2>',
-	\$psql_primary{stderr},
+	[
+		'psql', '--no-psqlrc', '--no-align',
+		'--file' => '-',
+		'--dbname' => $node_primary->connstr('postgres')
+	],
+	'<' => \$psql_primary{stdin},
+	'>' => \$psql_primary{stdout},
+	'2>' => \$psql_primary{stderr},
 	$psql_timeout);
 
 my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => '');
 $psql_standby{run} = IPC::Run::start(
-	[ 'psql', '-XA', '-f', '-', '-d', $node_standby->connstr('postgres') ],
-	'<',
-	\$psql_standby{stdin},
-	'>',
-	\$psql_standby{stdout},
-	'2>',
-	\$psql_standby{stderr},
+	[
+		'psql', '--no-psqlrc', '--no-align',
+		'--file' => '-',
+		'--dbname' => $node_standby->connstr('postgres')
+	],
+	'<' => \$psql_standby{stdin},
+	'>' => \$psql_standby{stdout},
+	'2>' => \$psql_standby{stderr},
 	$psql_timeout);
 
 
diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl
index 8903177d883..c31cab06f1c 100644
--- a/src/test/recovery/t/035_standby_logical_decoding.pl
+++ b/src/test/recovery/t/035_standby_logical_decoding.pl
@@ -75,18 +75,17 @@ sub make_slot_active
 	my $active_slot = $slot_prefix . 'activeslot';
 	$slot_user_handle = IPC::Run::start(
 		[
-			'pg_recvlogical', '-d',
-			$node->connstr('testdb'), '-S',
-			qq($active_slot), '-o',
-			'include-xids=0', '-o',
-			'skip-empty-xacts=1', '--no-loop',
-			'--start', '-f',
-			'-'
+			'pg_recvlogical',
+			'--dbname' => $node->connstr('testdb'),
+			'--slot' => $active_slot,
+			'--option' => 'include-xids=0',
+			'--option' => 'skip-empty-xacts=1',
+			'--file' => '-',
+			'--no-loop',
+			'--start',
 		],
-		'>',
-		$to_stdout,
-		'2>',
-		$to_stderr,
+		'>' => $to_stdout,
+		'2>' => $to_stderr,
 		IPC::Run::timeout($default_timeout));
 
 	if ($wait)
@@ -333,13 +332,14 @@ my %psql_subscriber = (
 	'subscriber_stdout' => '',
 	'subscriber_stderr' => '');
 $psql_subscriber{run} = IPC::Run::start(
-	[ 'psql', '-XA', '-f', '-', '-d', $node_subscriber->connstr('postgres') ],
-	'<',
-	\$psql_subscriber{subscriber_stdin},
-	'>',
-	\$psql_subscriber{subscriber_stdout},
-	'2>',
-	\$psql_subscriber{subscriber_stderr},
+	[
+		'psql', '--no-psqlrc', '--no-align',
+		'--file' => '-',
+		'--dbname' => $node_subscriber->connstr('postgres')
+	],
+	'<' => \$psql_subscriber{subscriber_stdin},
+	'>' => \$psql_subscriber{subscriber_stdout},
+	'2>' => \$psql_subscriber{subscriber_stderr},
 	IPC::Run::timeout($default_timeout));
 
 ##################################################
diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl
index 50388a494d6..8f65142909a 100644
--- a/src/test/recovery/t/040_standby_failover_slots_sync.pl
+++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl
@@ -18,7 +18,7 @@ my $publisher = PostgreSQL::Test::Cluster->new('publisher');
 # This is only needed on Windows machines that don't use UNIX sockets.
 $publisher->init(
 	allows_streaming => 'logical',
-	auth_extra => [ '--create-role', 'repl_role' ]);
+	auth_extra => [ '--create-role' => 'repl_role' ]);
 # Disable autovacuum to avoid generating xid during stats update as otherwise
 # the new XID could then be replicated to standby at some random point making
 # slots at primary lag behind standby during slot sync.
diff --git a/src/test/recovery/t/041_checkpoint_at_promote.pl b/src/test/recovery/t/041_checkpoint_at_promote.pl
index cc5aa80af40..cb63ac8d5c9 100644
--- a/src/test/recovery/t/041_checkpoint_at_promote.pl
+++ b/src/test/recovery/t/041_checkpoint_at_promote.pl
@@ -124,15 +124,14 @@ my $psql_timeout = IPC::Run::timer(3600);
 my ($killme_stdin, $killme_stdout, $killme_stderr) = ('', '', '');
 my $killme = IPC::Run::start(
 	[
-		'psql', '-XAtq', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
-		$node_standby->connstr('postgres')
+		'psql', '--no-psqlrc', '--no-align', '--tuples-only', '--quiet',
+		'--set' => 'ON_ERROR_STOP=1',
+		'--file' => '-',
+		'--dbname' => $node_standby->connstr('postgres')
 	],
-	'<',
-	\$killme_stdin,
-	'>',
-	\$killme_stdout,
-	'2>',
-	\$killme_stderr,
+	'<' => \$killme_stdin,
+	'>' => \$killme_stdout,
+	'2>' => \$killme_stderr,
 	$psql_timeout);
 $killme_stdin .= q[
 SELECT pg_backend_pid();
diff --git a/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl b/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl
index c329d7b06d4..bd70c916d9b 100644
--- a/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl
+++ b/src/tools/pg_bsd_indent/t/001_pg_bsd_indent.pl
@@ -51,7 +51,7 @@ while (my $test_src = glob("$src_dir/tests/*.0"))
 	# check result matches, adding any diff to $diffs_file
 	my $result =
 	  run_log([ 'diff', @diffopts, "$test_src.stdout", "$test.out" ],
-		'>>', $diffs_file);
+		'>>' => $diffs_file);
 	ok($result, "pg_bsd_indent output matches for $test");
 }
 
-- 
2.43.0

>From 1b02c5e26a15b12693efcaabb538f220263c4230 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= <ilm...@ilmari.org>
Date: Thu, 30 Jan 2025 12:52:01 +0000
Subject: [PATCH 2/2] perl: remove pointless quotes from hash keys

Both inside curly brackets and before fat commas

Some nominally-unnecessary quotes are left in place where there's a
mix of keys that need and don't need quoting in the same hash.
---
 contrib/auto_explain/t/001_auto_explain.pl    |  2 +-
 contrib/basebackup_to_shell/t/001_basic.pl    |  6 +-
 doc/src/sgml/generate-keywords-table.pl       | 14 +--
 src/backend/catalog/Catalog.pm                | 14 +--
 src/backend/parser/check_keywords.pl          |  8 +-
 src/backend/snowball/snowball_create.pl       |  4 +-
 src/backend/utils/Gen_fmgrtab.pl              |  4 +-
 src/backend/utils/mb/Unicode/UCS_to_most.pl   | 54 +++++-----
 src/bin/pg_basebackup/t/010_pg_basebackup.pl  |  2 +-
 .../t/011_in_place_tablespace.pl              |  2 +-
 src/bin/pg_basebackup/t/020_pg_receivewal.pl  |  8 +-
 src/bin/pg_basebackup/t/030_pg_recvlogical.pl |  4 +-
 src/bin/pg_combinebackup/t/008_promote.pl     |  2 +-
 src/bin/pg_dump/t/002_pg_dump.pl              | 10 +-
 src/bin/pg_upgrade/t/002_pg_upgrade.pl        |  2 +-
 src/bin/pg_verifybackup/t/003_corruption.pl   | 98 +++++++++----------
 src/bin/pg_verifybackup/t/008_untar.pl        | 54 +++++-----
 src/bin/pg_verifybackup/t/009_extract.pl      | 40 ++++----
 src/bin/pg_verifybackup/t/010_client_untar.pl | 68 ++++++-------
 src/interfaces/ecpg/preproc/parse.pl          | 50 +++++-----
 src/interfaces/libpq/t/001_uri.pl             |  2 +-
 src/pl/plperl/plc_perlboot.pl                 |  4 +-
 src/test/authentication/t/001_password.pl     | 22 ++---
 src/test/authentication/t/002_saslprep.pl     |  2 +-
 .../authentication/t/004_file_inclusion.pl    |  8 +-
 src/test/ldap/LdapServer.pm                   |  4 +-
 src/test/ldap/t/001_auth.pl                   | 42 ++++----
 src/test/ldap/t/002_bindpasswd.pl             |  2 +-
 .../t/001_mutated_bindpasswd.pl               |  2 +-
 .../modules/oauth_validator/t/001_server.pl   | 10 +-
 .../modules/oauth_validator/t/002_client.pl   |  6 +-
 .../modules/oauth_validator/t/OAuth/Server.pm | 18 ++--
 src/test/modules/test_pg_dump/t/001_base.pl   |  2 +-
 .../perl/PostgreSQL/Test/AdjustUpgrade.pm     |  2 +-
 .../perl/PostgreSQL/Test/BackgroundPsql.pm    | 10 +-
 src/test/perl/PostgreSQL/Test/Cluster.pm      | 18 ++--
 src/test/perl/PostgreSQL/Test/Kerberos.pm     |  6 +-
 src/test/perl/PostgreSQL/Test/Utils.pm        |  4 +-
 .../postmaster/t/002_connection_limits.pl     |  2 +-
 src/test/recovery/t/001_stream_rep.pl         |  2 +-
 src/test/recovery/t/006_logical_decoding.pl   |  4 +-
 .../t/010_logical_decoding_timelines.pl       | 10 +-
 src/test/recovery/t/021_row_visibility.pl     |  2 +-
 src/test/recovery/t/032_relfilenode_reuse.pl  |  2 +-
 .../t/035_standby_logical_decoding.pl         | 12 +--
 src/test/ssl/t/002_scram.pl                   |  4 +-
 src/test/ssl/t/003_sslinfo.pl                 |  6 +-
 src/test/subscription/t/027_nosuperuser.pl    |  6 +-
 src/tools/win32tzlist.pl                      | 14 +--
 49 files changed, 337 insertions(+), 337 deletions(-)

diff --git a/contrib/auto_explain/t/001_auto_explain.pl b/contrib/auto_explain/t/001_auto_explain.pl
index 80c0c19af58..25252604b7d 100644
--- a/contrib/auto_explain/t/001_auto_explain.pl
+++ b/contrib/auto_explain/t/001_auto_explain.pl
@@ -28,7 +28,7 @@ sub query_log
 }
 
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('auth_extra' => [ '--create-role' => 'regress_user1' ]);
+$node->init(auth_extra => [ '--create-role' => 'regress_user1' ]);
 $node->append_conf('postgresql.conf',
 	"session_preload_libraries = 'auto_explain'");
 $node->append_conf('postgresql.conf', "auto_explain.log_min_duration = 0");
diff --git a/contrib/basebackup_to_shell/t/001_basic.pl b/contrib/basebackup_to_shell/t/001_basic.pl
index 3ee4603bd3a..68ba69e034b 100644
--- a/contrib/basebackup_to_shell/t/001_basic.pl
+++ b/contrib/basebackup_to_shell/t/001_basic.pl
@@ -10,7 +10,7 @@ use Test::More;
 # For testing purposes, we just want basebackup_to_shell to write standard
 # input to a file.  However, Windows doesn't have "cat" or any equivalent, so
 # we use "gzip" for this purpose.
-my $gzip = $ENV{'GZIP_PROGRAM'};
+my $gzip = $ENV{GZIP_PROGRAM};
 if (!defined $gzip || $gzip eq '')
 {
 	plan skip_all => 'gzip not available';
@@ -24,8 +24,8 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
 # Make sure pg_hba.conf is set up to allow connections from backupuser.
 # This is only needed on Windows machines that don't use UNIX sockets.
 $node->init(
-	'allows_streaming' => 1,
-	'auth_extra' => [ '--create-role' => 'backupuser' ]);
+	allows_streaming => 1,
+	auth_extra => [ '--create-role' => 'backupuser' ]);
 
 $node->append_conf('postgresql.conf',
 	"shared_preload_libraries = 'basebackup_to_shell'");
diff --git a/doc/src/sgml/generate-keywords-table.pl b/doc/src/sgml/generate-keywords-table.pl
index 76c4689872f..2b3ee8793c6 100644
--- a/doc/src/sgml/generate-keywords-table.pl
+++ b/doc/src/sgml/generate-keywords-table.pl
@@ -43,7 +43,7 @@ while (<$fh>)
 {
 	if (/^PG_KEYWORD\("(\w+)", \w+, (\w+)_KEYWORD\, (\w+)\)/)
 	{
-		$keywords{ uc $1 }{'pg'}{ lc $2 } = 1;
+		$keywords{ uc $1 }{pg}{ lc $2 } = 1;
 		$as_keywords{ uc $1 } = 1 if $3 eq 'AS_LABEL';
 	}
 }
@@ -94,19 +94,19 @@ foreach my $word (sort keys %keywords)
 	print "    <entry><token>$printword</token></entry>\n";
 
 	print "    <entry>";
-	if ($keywords{$word}{pg}{'unreserved'})
+	if ($keywords{$word}{pg}{unreserved})
 	{
 		print "non-reserved";
 	}
-	elsif ($keywords{$word}{pg}{'col_name'})
+	elsif ($keywords{$word}{pg}{col_name})
 	{
 		print "non-reserved (cannot be function or type)";
 	}
-	elsif ($keywords{$word}{pg}{'type_func_name'})
+	elsif ($keywords{$word}{pg}{type_func_name})
 	{
 		print "reserved (can be function or type)";
 	}
-	elsif ($keywords{$word}{pg}{'reserved'})
+	elsif ($keywords{$word}{pg}{reserved})
 	{
 		print "reserved";
 	}
@@ -119,11 +119,11 @@ foreach my $word (sort keys %keywords)
 	foreach my $ver (@sql_versions)
 	{
 		print "    <entry>";
-		if ($keywords{$word}{$ver}{'reserved'})
+		if ($keywords{$word}{$ver}{reserved})
 		{
 			print "reserved";
 		}
-		elsif ($keywords{$word}{$ver}{'nonreserved'})
+		elsif ($keywords{$word}{$ver}{nonreserved})
 		{
 			print "non-reserved";
 		}
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index 5a912549b82..527d9af38ef 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -28,13 +28,13 @@ sub ParseHeader
 	# There are a few types which are given one name in the C source, but a
 	# different name at the SQL level.  These are enumerated here.
 	my %RENAME_ATTTYPE = (
-		'int16' => 'int2',
-		'int32' => 'int4',
-		'int64' => 'int8',
-		'Oid' => 'oid',
-		'NameData' => 'name',
-		'TransactionId' => 'xid',
-		'XLogRecPtr' => 'pg_lsn');
+		int16 => 'int2',
+		int32 => 'int4',
+		int64 => 'int8',
+		Oid => 'oid',
+		NameData => 'name',
+		TransactionId => 'xid',
+		XLogRecPtr => 'pg_lsn');
 
 	my %catalog;
 	my $declaring_attributes = 0;
diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl
index 2f25b2a1071..177bc5d99e6 100644
--- a/src/backend/parser/check_keywords.pl
+++ b/src/backend/parser/check_keywords.pl
@@ -47,10 +47,10 @@ $, = ' ';     # set output field separator
 $\ = "\n";    # set output record separator
 
 my %keyword_categories;
-$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
-$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
-$keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD';
-$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
+$keyword_categories{unreserved_keyword} = 'UNRESERVED_KEYWORD';
+$keyword_categories{col_name_keyword} = 'COL_NAME_KEYWORD';
+$keyword_categories{type_func_name_keyword} = 'TYPE_FUNC_NAME_KEYWORD';
+$keyword_categories{reserved_keyword} = 'RESERVED_KEYWORD';
 
 open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename");
 
diff --git a/src/backend/snowball/snowball_create.pl b/src/backend/snowball/snowball_create.pl
index dffa8feb769..1f25baa83af 100644
--- a/src/backend/snowball/snowball_create.pl
+++ b/src/backend/snowball/snowball_create.pl
@@ -50,8 +50,8 @@ our @languages = qw(
 # @languages.
 
 our %ascii_languages = (
-	'hindi' => 'english',
-	'russian' => 'english',);
+	hindi => 'english',
+	russian => 'english',);
 
 GetOptions(
 	'depfile' => \$depfile,
diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl
index 247e1c6ab4c..c4db6480674 100644
--- a/src/backend/utils/Gen_fmgrtab.pl
+++ b/src/backend/utils/Gen_fmgrtab.pl
@@ -208,8 +208,8 @@ foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
 # Create the fmgr_builtins table, collect data for fmgr_builtin_oid_index
 print $tfh "\nconst FmgrBuiltin fmgr_builtins[] = {\n";
 my %bmap;
-$bmap{'t'} = 'true';
-$bmap{'f'} = 'false';
+$bmap{t} = 'true';
+$bmap{f} = 'false';
 my @fmgr_builtin_oid_index;
 my $last_builtin_oid = 0;
 my $fmgr_count = 0;
diff --git a/src/backend/utils/mb/Unicode/UCS_to_most.pl b/src/backend/utils/mb/Unicode/UCS_to_most.pl
index b0009692521..4470ed9cd38 100755
--- a/src/backend/utils/mb/Unicode/UCS_to_most.pl
+++ b/src/backend/utils/mb/Unicode/UCS_to_most.pl
@@ -23,33 +23,33 @@ use convutils;
 my $this_script = 'src/backend/utils/mb/Unicode/UCS_to_most.pl';
 
 my %filename = (
-	'WIN866' => 'CP866.TXT',
-	'WIN874' => 'CP874.TXT',
-	'WIN1250' => 'CP1250.TXT',
-	'WIN1251' => 'CP1251.TXT',
-	'WIN1252' => 'CP1252.TXT',
-	'WIN1253' => 'CP1253.TXT',
-	'WIN1254' => 'CP1254.TXT',
-	'WIN1255' => 'CP1255.TXT',
-	'WIN1256' => 'CP1256.TXT',
-	'WIN1257' => 'CP1257.TXT',
-	'WIN1258' => 'CP1258.TXT',
-	'ISO8859_2' => '8859-2.TXT',
-	'ISO8859_3' => '8859-3.TXT',
-	'ISO8859_4' => '8859-4.TXT',
-	'ISO8859_5' => '8859-5.TXT',
-	'ISO8859_6' => '8859-6.TXT',
-	'ISO8859_7' => '8859-7.TXT',
-	'ISO8859_8' => '8859-8.TXT',
-	'ISO8859_9' => '8859-9.TXT',
-	'ISO8859_10' => '8859-10.TXT',
-	'ISO8859_13' => '8859-13.TXT',
-	'ISO8859_14' => '8859-14.TXT',
-	'ISO8859_15' => '8859-15.TXT',
-	'ISO8859_16' => '8859-16.TXT',
-	'KOI8R' => 'KOI8-R.TXT',
-	'KOI8U' => 'KOI8-U.TXT',
-	'GBK' => 'CP936.TXT');
+	WIN866 => 'CP866.TXT',
+	WIN874 => 'CP874.TXT',
+	WIN1250 => 'CP1250.TXT',
+	WIN1251 => 'CP1251.TXT',
+	WIN1252 => 'CP1252.TXT',
+	WIN1253 => 'CP1253.TXT',
+	WIN1254 => 'CP1254.TXT',
+	WIN1255 => 'CP1255.TXT',
+	WIN1256 => 'CP1256.TXT',
+	WIN1257 => 'CP1257.TXT',
+	WIN1258 => 'CP1258.TXT',
+	ISO8859_2 => '8859-2.TXT',
+	ISO8859_3 => '8859-3.TXT',
+	ISO8859_4 => '8859-4.TXT',
+	ISO8859_5 => '8859-5.TXT',
+	ISO8859_6 => '8859-6.TXT',
+	ISO8859_7 => '8859-7.TXT',
+	ISO8859_8 => '8859-8.TXT',
+	ISO8859_9 => '8859-9.TXT',
+	ISO8859_10 => '8859-10.TXT',
+	ISO8859_13 => '8859-13.TXT',
+	ISO8859_14 => '8859-14.TXT',
+	ISO8859_15 => '8859-15.TXT',
+	ISO8859_16 => '8859-16.TXT',
+	KOI8R => 'KOI8-R.TXT',
+	KOI8U => 'KOI8-U.TXT',
+	GBK => 'CP936.TXT');
 
 # make maps for all encodings if not specified
 my @charsets = (scalar(@ARGV) > 0) ? @ARGV : sort keys(%filename);
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 89ff26b6314..7cdd4442755 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -470,7 +470,7 @@ SKIP:
 	$node2->init_from_backup(
 		$node, 'tarbackup2',
 		tar_program => $tar,
-		'tablespace_map' => { $tblspcoid => $realRepTsDir });
+		tablespace_map => { $tblspcoid => $realRepTsDir });
 
 	$node2->start;
 	my $result = $node2->safe_psql('postgres', 'SELECT * FROM test1');
diff --git a/src/bin/pg_basebackup/t/011_in_place_tablespace.pl b/src/bin/pg_basebackup/t/011_in_place_tablespace.pl
index 9e53dada4fa..ec942e54eee 100644
--- a/src/bin/pg_basebackup/t/011_in_place_tablespace.pl
+++ b/src/bin/pg_basebackup/t/011_in_place_tablespace.pl
@@ -17,7 +17,7 @@ my @pg_basebackup_defs =
 
 # Set up an instance.
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('allows_streaming' => 1);
+$node->init(allows_streaming => 1);
 $node->start();
 
 # Create an in-place tablespace.
diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
index 4be96affd7b..499b6e5d298 100644
--- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl
+++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl
@@ -58,12 +58,12 @@ $primary->command_ok(
 	[ 'pg_receivewal', '--slot' => $slot_name, '--create-slot' ],
 	'creating a replication slot');
 my $slot = $primary->slot($slot_name);
-is($slot->{'slot_type'}, 'physical', 'physical replication slot was created');
-is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null');
+is($slot->{slot_type}, 'physical', 'physical replication slot was created');
+is($slot->{restart_lsn}, '', 'restart LSN of new slot is null');
 $primary->command_ok(
 	[ 'pg_receivewal', '--slot' => $slot_name, '--drop-slot' ],
 	'dropping a replication slot');
-is($primary->slot($slot_name)->{'slot_type'},
+is($primary->slot($slot_name)->{slot_type},
 	'', 'replication slot was removed');
 
 # Generate some WAL.  Use --synchronous at the same time to add more
@@ -318,7 +318,7 @@ $primary->wait_for_catchup($standby);
 # Get a walfilename from before the promotion to make sure it is archived
 # after promotion
 my $standby_slot = $standby->slot($archive_slot);
-my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
+my $replication_slot_lsn = $standby_slot->{restart_lsn};
 
 # pg_walfile_name() is not supported while in recovery, so use the primary
 # to build the segment name.  Both nodes are on the same timeline, so this
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index a6e10600161..c5b165cfe13 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -53,7 +53,7 @@ $node->command_ok(
 	'slot created');
 
 my $slot = $node->slot('test');
-isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
+isnt($slot->{restart_lsn}, '', 'restart lsn is defined for new slot');
 
 $node->psql('postgres', 'CREATE TABLE test_table(x integer)');
 $node->psql('postgres',
@@ -95,7 +95,7 @@ $node->command_ok(
 	'slot with two-phase created');
 
 $slot = $node->slot('test');
-isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
+isnt($slot->{restart_lsn}, '', 'restart lsn is defined for new slot');
 
 $node->safe_psql('postgres',
 	"BEGIN; INSERT INTO test_table values (11); PREPARE TRANSACTION 'test'");
diff --git a/src/bin/pg_combinebackup/t/008_promote.pl b/src/bin/pg_combinebackup/t/008_promote.pl
index 732f6397103..3a15983f4a1 100644
--- a/src/bin/pg_combinebackup/t/008_promote.pl
+++ b/src/bin/pg_combinebackup/t/008_promote.pl
@@ -52,7 +52,7 @@ EOM
 # then stop recovery at some arbitrary LSN, not just when it hits the end of
 # WAL, so use a recovery target.
 my $node2 = PostgreSQL::Test::Cluster->new('node2');
-$node2->init_from_backup($node1, 'backup1', 'has_streaming' => 1);
+$node2->init_from_backup($node1, 'backup1', has_streaming => 1);
 $node2->append_conf('postgresql.conf', <<EOM);
 recovery_target_lsn = '$lsn'
 recovery_target_action = 'pause'
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index c7bffc1b045..03c2d50740c 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -119,7 +119,7 @@ my %pgdump_runs = (
 		# Give coverage for manually compressed blobs.toc files during
 		# restore.
 		compress_cmd => {
-			program => $ENV{'GZIP_PROGRAM'},
+			program => $ENV{GZIP_PROGRAM},
 			args => [ '-f', "$tempdir/compression_gzip_dir/blobs_*.toc", ],
 		},
 		# Verify that only data files were compressed
@@ -147,7 +147,7 @@ my %pgdump_runs = (
 		],
 		# Decompress the generated file to run through the tests.
 		compress_cmd => {
-			program => $ENV{'GZIP_PROGRAM'},
+			program => $ENV{GZIP_PROGRAM},
 			args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
 		},
 	},
@@ -215,7 +215,7 @@ my %pgdump_runs = (
 		],
 		# Decompress the generated file to run through the tests.
 		compress_cmd => {
-			program => $ENV{'LZ4'},
+			program => $ENV{LZ4},
 			args => [
 				'-d', '-f',
 				"$tempdir/compression_lz4_plain.sql.lz4",
@@ -263,7 +263,7 @@ my %pgdump_runs = (
 		# Give coverage for manually compressed blobs.toc files during
 		# restore.
 		compress_cmd => {
-			program => $ENV{'ZSTD'},
+			program => $ENV{ZSTD},
 			args => [
 				'-z', '-f',
 				'--rm', "$tempdir/compression_zstd_dir/blobs_*.toc",
@@ -295,7 +295,7 @@ my %pgdump_runs = (
 		],
 		# Decompress the generated file to run through the tests.
 		compress_cmd => {
-			program => $ENV{'ZSTD'},
+			program => $ENV{ZSTD},
 			args => [
 				'-d', '-f',
 				"$tempdir/compression_zstd_plain.sql.zst", "-o",
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 00051b85035..9438e407bc9 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -164,7 +164,7 @@ push @initdb_params, ('--lc-collate', $original_datcollate);
 push @initdb_params, ('--lc-ctype', $original_datctype);
 
 # add --locale-provider, if supported
-my %provider_name = ('b' => 'builtin', 'i' => 'icu', 'c' => 'libc');
+my %provider_name = (b => 'builtin', i => 'icu', c => 'libc');
 if ($oldnode->pg_version >= 15)
 {
 	push @initdb_params,
diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl
index 84f23b8bc3d..2f280905a22 100644
--- a/src/bin/pg_verifybackup/t/003_corruption.pl
+++ b/src/bin/pg_verifybackup/t/003_corruption.pl
@@ -34,86 +34,86 @@ EOM
 
 my @scenario = (
 	{
-		'name' => 'extra_file',
-		'mutilate' => \&mutilate_extra_file,
-		'fails_like' =>
+		name => 'extra_file',
+		mutilate => \&mutilate_extra_file,
+		fails_like =>
 		  qr/extra_file.*present (on disk|in "[^"]+") but not in the manifest/
 	},
 	{
-		'name' => 'extra_tablespace_file',
-		'mutilate' => \&mutilate_extra_tablespace_file,
-		'fails_like' =>
+		name => 'extra_tablespace_file',
+		mutilate => \&mutilate_extra_tablespace_file,
+		fails_like =>
 		  qr/extra_ts_file.*present (on disk|in "[^"]+") but not in the manifest/
 	},
 	{
-		'name' => 'missing_file',
-		'mutilate' => \&mutilate_missing_file,
-		'fails_like' =>
+		name => 'missing_file',
+		mutilate => \&mutilate_missing_file,
+		fails_like =>
 		  qr/pg_xact\/0000.*present in the manifest but not (on disk|in "[^"]+")/
 	},
 	{
-		'name' => 'missing_tablespace',
-		'mutilate' => \&mutilate_missing_tablespace,
-		'fails_like' =>
+		name => 'missing_tablespace',
+		mutilate => \&mutilate_missing_tablespace,
+		fails_like =>
 		  qr/pg_tblspc.*present in the manifest but not (on disk|in "[^"]+")/
 	},
 	{
-		'name' => 'append_to_file',
-		'mutilate' => \&mutilate_append_to_file,
-		'fails_like' =>
+		name => 'append_to_file',
+		mutilate => \&mutilate_append_to_file,
+		fails_like =>
 		  qr/has size \d+ (on disk|in "[^"]+") but size \d+ in the manifest/
 	},
 	{
-		'name' => 'truncate_file',
-		'mutilate' => \&mutilate_truncate_file,
-		'fails_like' =>
+		name => 'truncate_file',
+		mutilate => \&mutilate_truncate_file,
+		fails_like =>
 		  qr/has size 0 (on disk|in "[^"]+") but size \d+ in the manifest/
 	},
 	{
-		'name' => 'replace_file',
-		'mutilate' => \&mutilate_replace_file,
-		'fails_like' => qr/checksum mismatch for file/
+		name => 'replace_file',
+		mutilate => \&mutilate_replace_file,
+		fails_like => qr/checksum mismatch for file/
 	},
 	{
-		'name' => 'system_identifier',
-		'mutilate' => \&mutilate_system_identifier,
-		'fails_like' =>
+		name => 'system_identifier',
+		mutilate => \&mutilate_system_identifier,
+		fails_like =>
 		  qr/manifest system identifier is .*, but control file has/
 	},
 	{
-		'name' => 'bad_manifest',
-		'mutilate' => \&mutilate_bad_manifest,
-		'fails_like' => qr/manifest checksum mismatch/
+		name => 'bad_manifest',
+		mutilate => \&mutilate_bad_manifest,
+		fails_like => qr/manifest checksum mismatch/
 	},
 	{
-		'name' => 'open_file_fails',
-		'mutilate' => \&mutilate_open_file_fails,
-		'fails_like' => qr/could not open file/,
-		'needs_unix_permissions' => 1
+		name => 'open_file_fails',
+		mutilate => \&mutilate_open_file_fails,
+		fails_like => qr/could not open file/,
+		needs_unix_permissions => 1
 	},
 	{
-		'name' => 'open_directory_fails',
-		'mutilate' => \&mutilate_open_directory_fails,
-		'cleanup' => \&cleanup_open_directory_fails,
-		'fails_like' => qr/could not open directory/,
-		'needs_unix_permissions' => 1
+		name => 'open_directory_fails',
+		mutilate => \&mutilate_open_directory_fails,
+		cleanup => \&cleanup_open_directory_fails,
+		fails_like => qr/could not open directory/,
+		needs_unix_permissions => 1
 	},
 	{
-		'name' => 'search_directory_fails',
-		'mutilate' => \&mutilate_search_directory_fails,
-		'cleanup' => \&cleanup_search_directory_fails,
-		'fails_like' => qr/could not stat file or directory/,
-		'needs_unix_permissions' => 1
+		name => 'search_directory_fails',
+		mutilate => \&mutilate_search_directory_fails,
+		cleanup => \&cleanup_search_directory_fails,
+		fails_like => qr/could not stat file or directory/,
+		needs_unix_permissions => 1
 	});
 
 for my $scenario (@scenario)
 {
-	my $name = $scenario->{'name'};
+	my $name = $scenario->{name};
 
   SKIP:
 	{
 		skip "unix-style permissions not supported on Windows", 4
-		  if ($scenario->{'needs_unix_permissions'}
+		  if ($scenario->{needs_unix_permissions}
 			&& ($windows_os || $Config::Config{osname} eq 'cygwin'));
 
 		# Take a backup and check that it verifies OK.
@@ -137,23 +137,23 @@ for my $scenario (@scenario)
 			"intact backup verified");
 
 		# Mutilate the backup in some way.
-		$scenario->{'mutilate'}->($backup_path);
+		$scenario->{mutilate}->($backup_path);
 
 		# Now check that the backup no longer verifies.
 		command_fails_like(
 			[ 'pg_verifybackup', $backup_path ],
-			$scenario->{'fails_like'},
+			$scenario->{fails_like},
 			"corrupt backup fails verification: $name");
 
 		# Run cleanup hook, if provided.
-		$scenario->{'cleanup'}->($backup_path)
-		  if exists $scenario->{'cleanup'};
+		$scenario->{cleanup}->($backup_path)
+		  if exists $scenario->{cleanup};
 
 		# Turn it into a tar-format backup and see if we can still detect the
 		# same problem, unless the scenario needs UNIX permissions or we don't
 		# have a TAR program available. Note that this destructively modifies
 		# the backup directory.
-		if (   !$scenario->{'needs_unix_permissions'}
+		if (   !$scenario->{needs_unix_permissions}
 			|| !defined $tar
 			|| $tar eq '')
 		{
@@ -197,7 +197,7 @@ for my $scenario (@scenario)
 			# here, because pg_waldump can't yet read WAL from a tarfile.
 			command_fails_like(
 				[ 'pg_verifybackup', '--no-parse-wal', $tar_backup_path ],
-				$scenario->{'fails_like'},
+				$scenario->{fails_like},
 				"corrupt backup fails verification: $name");
 
 			# Use rmtree to reclaim space.
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index deed3ec247d..a98a28424fe 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -35,48 +35,48 @@ my $extract_path = $primary->backup_dir . '/extracted-backup';
 
 my @test_configuration = (
 	{
-		'compression_method' => 'none',
-		'backup_flags' => [],
-		'backup_archive' => [ 'base.tar', "$tsoid.tar" ],
-		'enabled' => 1
+		compression_method => 'none',
+		backup_flags => [],
+		backup_archive => [ 'base.tar', "$tsoid.tar" ],
+		enabled => 1
 	},
 	{
-		'compression_method' => 'gzip',
-		'backup_flags' => [ '--compress', 'server-gzip' ],
-		'backup_archive' => [ 'base.tar.gz', "$tsoid.tar.gz" ],
-		'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+		compression_method => 'gzip',
+		backup_flags => [ '--compress', 'server-gzip' ],
+		backup_archive => [ 'base.tar.gz', "$tsoid.tar.gz" ],
+		enabled => check_pg_config("#define HAVE_LIBZ 1")
 	},
 	{
-		'compression_method' => 'lz4',
-		'backup_flags' => [ '--compress', 'server-lz4' ],
-		'backup_archive' => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
-		'enabled' => check_pg_config("#define USE_LZ4 1")
+		compression_method => 'lz4',
+		backup_flags => [ '--compress', 'server-lz4' ],
+		backup_archive => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
+		enabled => check_pg_config("#define USE_LZ4 1")
 	},
 	{
-		'compression_method' => 'zstd',
-		'backup_flags' => [ '--compress', 'server-zstd' ],
-		'backup_archive' => [ 'base.tar.zst', "$tsoid.tar.zst" ],
-		'enabled' => check_pg_config("#define USE_ZSTD 1")
+		compression_method => 'zstd',
+		backup_flags => [ '--compress', 'server-zstd' ],
+		backup_archive => [ 'base.tar.zst', "$tsoid.tar.zst" ],
+		enabled => check_pg_config("#define USE_ZSTD 1")
 	},
 	{
-		'compression_method' => 'zstd',
-		'backup_flags' => [ '--compress', 'server-zstd:level=1,long' ],
-		'backup_archive' => [ 'base.tar.zst', "$tsoid.tar.zst" ],
-		'enabled' => check_pg_config("#define USE_ZSTD 1")
+		compression_method => 'zstd',
+		backup_flags => [ '--compress', 'server-zstd:level=1,long' ],
+		backup_archive => [ 'base.tar.zst', "$tsoid.tar.zst" ],
+		enabled => check_pg_config("#define USE_ZSTD 1")
 	});
 
 for my $tc (@test_configuration)
 {
-	my $method = $tc->{'compression_method'};
+	my $method = $tc->{compression_method};
 
   SKIP:
 	{
 		skip "$method compression not supported by this build", 3
-		  if !$tc->{'enabled'};
+		  if !$tc->{enabled};
 		skip "no decompressor available for $method", 3
-		  if exists $tc->{'decompress_program'}
-		  && (!defined $tc->{'decompress_program'}
-			|| $tc->{'decompress_program'} eq '');
+		  if exists $tc->{decompress_program}
+		  && (!defined $tc->{decompress_program}
+			|| $tc->{decompress_program} eq '');
 
 		# Take a server-side backup.
 		$primary->command_ok(
@@ -85,7 +85,7 @@ for my $tc (@test_configuration)
 				'--checkpoint' => 'fast',
 				'--target' => "server:$backup_path",
 				'--wal-method' => 'fetch',
-				@{ $tc->{'backup_flags'} },
+				@{ $tc->{backup_flags} },
 			],
 			"server side backup, compression $method");
 
@@ -94,7 +94,7 @@ for my $tc (@test_configuration)
 		my $backup_files = join(',',
 			sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
 		my $expected_backup_files =
-		  join(',', sort ('backup_manifest', @{ $tc->{'backup_archive'} }));
+		  join(',', sort ('backup_manifest', @{ $tc->{backup_archive} }));
 		is($backup_files, $expected_backup_files,
 			"found expected backup files, compression $method");
 
diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl
index 25605291217..04d378415d3 100644
--- a/src/bin/pg_verifybackup/t/009_extract.pl
+++ b/src/bin/pg_verifybackup/t/009_extract.pl
@@ -16,42 +16,42 @@ $primary->start;
 
 my @test_configuration = (
 	{
-		'compression_method' => 'none',
-		'backup_flags' => [],
-		'enabled' => 1
+		compression_method => 'none',
+		backup_flags => [],
+		enabled => 1
 	},
 	{
-		'compression_method' => 'gzip',
-		'backup_flags' => [ '--compress', 'server-gzip:5' ],
-		'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+		compression_method => 'gzip',
+		backup_flags => [ '--compress', 'server-gzip:5' ],
+		enabled => check_pg_config("#define HAVE_LIBZ 1")
 	},
 	{
-		'compression_method' => 'lz4',
-		'backup_flags' => [ '--compress', 'server-lz4:5' ],
-		'enabled' => check_pg_config("#define USE_LZ4 1")
+		compression_method => 'lz4',
+		backup_flags => [ '--compress', 'server-lz4:5' ],
+		enabled => check_pg_config("#define USE_LZ4 1")
 	},
 	{
-		'compression_method' => 'zstd',
-		'backup_flags' => [ '--compress', 'server-zstd:5' ],
-		'enabled' => check_pg_config("#define USE_ZSTD 1")
+		compression_method => 'zstd',
+		backup_flags => [ '--compress', 'server-zstd:5' ],
+		enabled => check_pg_config("#define USE_ZSTD 1")
 	},
 	{
-		'compression_method' => 'parallel zstd',
-		'backup_flags' => [ '--compress', 'server-zstd:workers=3' ],
-		'enabled' => check_pg_config("#define USE_ZSTD 1"),
-		'possibly_unsupported' =>
+		compression_method => 'parallel zstd',
+		backup_flags => [ '--compress', 'server-zstd:workers=3' ],
+		enabled => check_pg_config("#define USE_ZSTD 1"),
+		possibly_unsupported =>
 		  qr/could not set compression worker count to 3: Unsupported parameter/
 	});
 
 for my $tc (@test_configuration)
 {
 	my $backup_path = $primary->backup_dir . '/' . 'extract_backup';
-	my $method = $tc->{'compression_method'};
+	my $method = $tc->{compression_method};
 
   SKIP:
 	{
 		skip "$method compression not supported by this build", 2
-		  if !$tc->{'enabled'};
+		  if !$tc->{enabled};
 
 		# A backup with a valid compression method should work.
 		my $backup_stdout = '';
@@ -77,8 +77,8 @@ for my $tc (@test_configuration)
 			print "# standard error was:\n$backup_stderr";
 		}
 		if (  !$backup_result
-			&& $tc->{'possibly_unsupported'}
-			&& $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+			&& $tc->{possibly_unsupported}
+			&& $backup_stderr =~ /$tc->{possibly_unsupported}/)
 		{
 			skip "compression with $method not supported by this build", 2;
 		}
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index d8d2b06c7ee..acc3dfdfe20 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -20,56 +20,56 @@ my $extract_path = $primary->backup_dir . '/extracted-backup';
 
 my @test_configuration = (
 	{
-		'compression_method' => 'none',
-		'backup_flags' => [],
-		'backup_archive' => 'base.tar',
-		'enabled' => 1
+		compression_method => 'none',
+		backup_flags => [],
+		backup_archive => 'base.tar',
+		enabled => 1
 	},
 	{
-		'compression_method' => 'gzip',
-		'backup_flags' => [ '--compress', 'client-gzip:5' ],
-		'backup_archive' => 'base.tar.gz',
-		'enabled' => check_pg_config("#define HAVE_LIBZ 1")
+		compression_method => 'gzip',
+		backup_flags => [ '--compress', 'client-gzip:5' ],
+		backup_archive => 'base.tar.gz',
+		enabled => check_pg_config("#define HAVE_LIBZ 1")
 	},
 	{
-		'compression_method' => 'lz4',
-		'backup_flags' => [ '--compress', 'client-lz4:5' ],
-		'backup_archive' => 'base.tar.lz4',
-		'enabled' => check_pg_config("#define USE_LZ4 1")
+		compression_method => 'lz4',
+		backup_flags => [ '--compress', 'client-lz4:5' ],
+		backup_archive => 'base.tar.lz4',
+		enabled => check_pg_config("#define USE_LZ4 1")
 	},
 	{
-		'compression_method' => 'zstd',
-		'backup_flags' => [ '--compress', 'client-zstd:5' ],
-		'backup_archive' => 'base.tar.zst',
-		'enabled' => check_pg_config("#define USE_ZSTD 1")
+		compression_method => 'zstd',
+		backup_flags => [ '--compress', 'client-zstd:5' ],
+		backup_archive => 'base.tar.zst',
+		enabled => check_pg_config("#define USE_ZSTD 1")
 	},
 	{
-		'compression_method' => 'zstd',
-		'backup_flags' => [ '--compress', 'client-zstd:level=1,long' ],
-		'backup_archive' => 'base.tar.zst',
-		'enabled' => check_pg_config("#define USE_ZSTD 1")
+		compression_method => 'zstd',
+		backup_flags => [ '--compress', 'client-zstd:level=1,long' ],
+		backup_archive => 'base.tar.zst',
+		enabled => check_pg_config("#define USE_ZSTD 1")
 	},
 	{
-		'compression_method' => 'parallel zstd',
-		'backup_flags' => [ '--compress', 'client-zstd:workers=3' ],
-		'backup_archive' => 'base.tar.zst',
-		'enabled' => check_pg_config("#define USE_ZSTD 1"),
-		'possibly_unsupported' =>
+		compression_method => 'parallel zstd',
+		backup_flags => [ '--compress', 'client-zstd:workers=3' ],
+		backup_archive => 'base.tar.zst',
+		enabled => check_pg_config("#define USE_ZSTD 1"),
+		possibly_unsupported =>
 		  qr/could not set compression worker count to 3: Unsupported parameter/
 	});
 
 for my $tc (@test_configuration)
 {
-	my $method = $tc->{'compression_method'};
+	my $method = $tc->{compression_method};
 
   SKIP:
 	{
 		skip "$method compression not supported by this build", 3
-		  if !$tc->{'enabled'};
+		  if !$tc->{enabled};
 		skip "no decompressor available for $method", 3
-		  if exists $tc->{'decompress_program'}
-		  && (!defined $tc->{'decompress_program'}
-			|| $tc->{'decompress_program'} eq '');
+		  if exists $tc->{decompress_program}
+		  && (!defined $tc->{decompress_program}
+			|| $tc->{decompress_program} eq '');
 
 		# Take a client-side backup.
 		my $backup_stdout = '';
@@ -81,7 +81,7 @@ for my $tc (@test_configuration)
 				'--wal-method' => 'fetch',
 				'--checkpoint' => 'fast',
 				'--format' => 'tar',
-				@{ $tc->{'backup_flags'} }
+				@{ $tc->{backup_flags} }
 			],
 			'>' => \$backup_stdout,
 			'2>' => \$backup_stderr);
@@ -94,8 +94,8 @@ for my $tc (@test_configuration)
 			print "# standard error was:\n$backup_stderr";
 		}
 		if (  !$backup_result
-			&& $tc->{'possibly_unsupported'}
-			&& $backup_stderr =~ /$tc->{'possibly_unsupported'}/)
+			&& $tc->{possibly_unsupported}
+			&& $backup_stderr =~ /$tc->{possibly_unsupported}/)
 		{
 			skip "compression with $method not supported by this build", 3;
 		}
@@ -108,7 +108,7 @@ for my $tc (@test_configuration)
 		my $backup_files = join(',',
 			sort grep { $_ ne '.' && $_ ne '..' } slurp_dir($backup_path));
 		my $expected_backup_files =
-		  join(',', sort ('backup_manifest', $tc->{'backup_archive'}));
+		  join(',', sort ('backup_manifest', $tc->{backup_archive}));
 		is($backup_files, $expected_backup_files,
 			"found expected backup files, compression $method");
 
diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl
index f22ca213c21..257174e277e 100644
--- a/src/interfaces/ecpg/preproc/parse.pl
+++ b/src/interfaces/ecpg/preproc/parse.pl
@@ -39,12 +39,12 @@ GetOptions(
 
 # Substitutions to apply to tokens whenever they are seen in a rule.
 my %replace_token = (
-	'BCONST' => 'ecpg_bconst',
-	'FCONST' => 'ecpg_fconst',
-	'Sconst' => 'ecpg_sconst',
-	'XCONST' => 'ecpg_xconst',
-	'IDENT' => 'ecpg_ident',
-	'PARAM' => 'ecpg_param',);
+	BCONST => 'ecpg_bconst',
+	FCONST => 'ecpg_fconst',
+	Sconst => 'ecpg_sconst',
+	XCONST => 'ecpg_xconst',
+	IDENT => 'ecpg_ident',
+	PARAM => 'ecpg_param',);
 
 my %replace_token_used;
 
@@ -53,24 +53,24 @@ my %replace_token_used;
 # for that nonterminal.  (In either case, ecpg.trailer had better provide
 # a substitute rule, since the default won't do.)
 my %replace_types = (
-	'PrepareStmt' => '<prep>',
-	'ExecuteStmt' => '<exec>',
-	'opt_array_bounds' => '<index>',
+	PrepareStmt => '<prep>',
+	ExecuteStmt => '<exec>',
+	opt_array_bounds => '<index>',
 
 	# "ignore" means: do not create type and rules for this nonterminal
-	'parse_toplevel' => 'ignore',
-	'stmtmulti' => 'ignore',
-	'CreateAsStmt' => 'ignore',
-	'DeallocateStmt' => 'ignore',
-	'ColId' => 'ignore',
-	'type_function_name' => 'ignore',
-	'ColLabel' => 'ignore',
-	'Sconst' => 'ignore',
-	'opt_distinct_clause' => 'ignore',
-	'PLpgSQL_Expr' => 'ignore',
-	'PLAssignStmt' => 'ignore',
-	'plassign_target' => 'ignore',
-	'plassign_equals' => 'ignore',);
+	parse_toplevel => 'ignore',
+	stmtmulti => 'ignore',
+	CreateAsStmt => 'ignore',
+	DeallocateStmt => 'ignore',
+	ColId => 'ignore',
+	type_function_name => 'ignore',
+	ColLabel => 'ignore',
+	Sconst => 'ignore',
+	opt_distinct_clause => 'ignore',
+	PLpgSQL_Expr => 'ignore',
+	PLAssignStmt => 'ignore',
+	plassign_target => 'ignore',
+	plassign_equals => 'ignore',);
 
 my %replace_types_used;
 
@@ -565,7 +565,7 @@ sub emit_rule_action
 	# Emit the addons entry's code block.
 	# We have an array to add to the buffer, we'll add it directly instead of
 	# calling add_to_buffer, which does not know about arrays.
-	push(@{ $buff{'rules'} }, @{ $rec->{lines} });
+	push(@{ $buff{rules} }, @{ $rec->{lines} });
 
 	if ($rectype eq 'addon')
 	{
@@ -686,8 +686,8 @@ sub emit_rule
 =top
 	load ecpg.addons into %addons hash.  The result is something like
 	%addons = {
-		'stmt ClosePortalStmt' => { 'type' => 'block', 'lines' => [ "{", "if (INFORMIX_MODE)" ..., "}" ], 'used' => 0 },
-		'stmt ViewStmt' => { 'type' => 'rule', 'lines' => [ "| ECPGAllocateDescr", ... ], 'used' => 0 }
+		'stmt ClosePortalStmt' => { type => 'block', lines => [ "{", "if (INFORMIX_MODE)" ..., "}" ], used => 0 },
+		'stmt ViewStmt' => { type => 'rule', lines => [ "| ECPGAllocateDescr", ... ], used => 0 }
 	}
 
 =cut
diff --git a/src/interfaces/libpq/t/001_uri.pl b/src/interfaces/libpq/t/001_uri.pl
index b0edcb3be88..bc797cc85f5 100644
--- a/src/interfaces/libpq/t/001_uri.pl
+++ b/src/interfaces/libpq/t/001_uri.pl
@@ -264,7 +264,7 @@ sub test_uri
 
 	($uri, $expect{stdout}, $expect{stderr}, %envvars) = @$_;
 
-	$expect{'exit'} = $expect{stderr} eq '';
+	$expect{exit} = $expect{stderr} eq '';
 	%ENV = (%ENV, %envvars);
 
 	my $cmd = [ 'libpq_uri_regress', $uri ];
diff --git a/src/pl/plperl/plc_perlboot.pl b/src/pl/plperl/plc_perlboot.pl
index 28a1a4cd6f2..6c1100981fa 100644
--- a/src/pl/plperl/plc_perlboot.pl
+++ b/src/pl/plperl/plc_perlboot.pl
@@ -116,12 +116,12 @@ sub ::encode_array_constructor
 	sub to_str
 	{
 		my $self = shift;
-		return ::encode_typed_literal($self->{'array'}, $self->{'typeoid'});
+		return ::encode_typed_literal($self->{array}, $self->{typeoid});
 	}
 
 	sub to_arr
 	{
-		return shift->{'array'};
+		return shift->{array};
 	}
 
 	1;
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl
index 8269c470b59..00fa0cf4c43 100644
--- a/src/test/authentication/t/001_password.pl
+++ b/src/test/authentication/t/001_password.pl
@@ -142,7 +142,7 @@ $node->safe_psql(
 	'postgres',
 	"CREATE TABLE sysuser_data (n) AS SELECT NULL FROM generate_series(1, 10);
 	 GRANT ALL ON sysuser_data TO scram_role;");
-$ENV{"PGPASSWORD"} = 'pass';
+$ENV{PGPASSWORD} = 'pass';
 
 # Create a role that contains a comma to stress the parsing.
 $node->safe_psql('postgres',
@@ -465,10 +465,10 @@ $node->connect_fails(
 	expected_stderr => qr/server requested SCRAM-SHA-256 authentication/);
 
 # Test that bad passwords are rejected.
-$ENV{"PGPASSWORD"} = 'badpass';
+$ENV{PGPASSWORD} = 'badpass';
 test_conn($node, 'user=scram_role', 'scram-sha-256', 2,
 	log_unlike => [qr/connection authenticated:/]);
-$ENV{"PGPASSWORD"} = 'pass';
+$ENV{PGPASSWORD} = 'pass';
 
 # For "md5" method, all users should be able to connect (SCRAM
 # authentication will be performed for the user with a SCRAM secret.)
@@ -550,19 +550,19 @@ is($res, 't',
 # Tests for channel binding without SSL.
 # Using the password authentication method; channel binding can't work
 reset_pg_hba($node, 'all', 'all', 'password');
-$ENV{"PGCHANNELBINDING"} = 'require';
+$ENV{PGCHANNELBINDING} = 'require';
 test_conn($node, 'user=scram_role', 'scram-sha-256', 2);
 # SSL not in use; channel binding still can't work
 reset_pg_hba($node, 'all', 'all', 'scram-sha-256');
-$ENV{"PGCHANNELBINDING"} = 'require';
+$ENV{PGCHANNELBINDING} = 'require';
 test_conn($node, 'user=scram_role', 'scram-sha-256', 2);
 
 # Test .pgpass processing; but use a temp file, don't overwrite the real one!
 my $pgpassfile = "${PostgreSQL::Test::Utils::tmp_check}/pgpass";
 
-delete $ENV{"PGPASSWORD"};
-delete $ENV{"PGCHANNELBINDING"};
-$ENV{"PGPASSFILE"} = $pgpassfile;
+delete $ENV{PGPASSWORD};
+delete $ENV{PGCHANNELBINDING};
+$ENV{PGPASSFILE} = $pgpassfile;
 
 unlink($pgpassfile);
 append_to_file(
@@ -633,7 +633,7 @@ test_conn(
 	2, log_unlike => [qr/connection authenticated:/]);
 
 unlink($pgpassfile);
-delete $ENV{"PGPASSFILE"};
+delete $ENV{PGPASSFILE};
 
 note "Authentication tests with specific HBA policies on roles";
 
@@ -648,7 +648,7 @@ CREATE ROLE regress_member LOGIN SUPERUSER IN ROLE regress_regression_group PASS
 CREATE ROLE regress_not_member LOGIN SUPERUSER PASSWORD 'pass';});
 
 # Test role with exact matching, no members allowed.
-$ENV{"PGPASSWORD"} = 'pass';
+$ENV{PGPASSWORD} = 'pass';
 reset_pg_hba($node, 'all', 'regress_regression_group', 'scram-sha-256');
 test_conn(
 	$node,
@@ -704,7 +704,7 @@ test_conn(
 	]);
 
 # Test role membership is respected for samerole
-$ENV{"PGDATABASE"} = 'regress_regression_group';
+$ENV{PGDATABASE} = 'regress_regression_group';
 reset_pg_hba($node, 'samerole', 'all', 'scram-sha-256');
 test_conn(
 	$node,
diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl
index cdf0f965252..0a38471e339 100644
--- a/src/test/authentication/t/002_saslprep.pl
+++ b/src/test/authentication/t/002_saslprep.pl
@@ -46,7 +46,7 @@ sub test_login
 	my $testname =
 	  "authentication $status_string for role $role with password $password";
 
-	$ENV{"PGPASSWORD"} = $password;
+	$ENV{PGPASSWORD} = $password;
 	if ($expected_res eq 0)
 	{
 		$node->connect_ok($connstr, $testname);
diff --git a/src/test/authentication/t/004_file_inclusion.pl b/src/test/authentication/t/004_file_inclusion.pl
index b9d3663542d..4f6a94a542c 100644
--- a/src/test/authentication/t/004_file_inclusion.pl
+++ b/src/test/authentication/t/004_file_inclusion.pl
@@ -21,7 +21,7 @@ if (!$use_unix_sockets)
 # are used to respectively track pg_hba_file_rules.rule_number and
 # pg_ident_file_mappings.map_number, which are the global counters associated
 # to each view tracking the priority of each entry processed.
-my %line_counters = ('hba_rule' => 0, 'ident_rule' => 0);
+my %line_counters = (hba_rule => 0, ident_rule => 0);
 
 # Add some data to the given HBA configuration file, generating the contents
 # expected to match pg_hba_file_rules.
@@ -61,7 +61,7 @@ sub add_hba_line
 	return '' if ($entry =~ qr/^include/);
 
 	# Increment pg_hba_file_rules.rule_number and save it.
-	$globline = ++$line_counters{'hba_rule'};
+	$globline = ++$line_counters{hba_rule};
 
 	# Generate the expected pg_hba_file_rules line
 	@tokens = split(/ /, $entry);
@@ -119,7 +119,7 @@ sub add_ident_line
 	return '' if ($entry =~ qr/^include/);
 
 	# Increment pg_ident_file_mappings.map_number and get it.
-	$globline = ++$line_counters{'ident_rule'};
+	$globline = ++$line_counters{ident_rule};
 
 	# Generate the expected pg_ident_file_mappings line
 	@tokens = split(/ /, $entry);
@@ -213,7 +213,7 @@ add_hba_line($node, $hba_file, 'local @../dbnames.conf all reject');
 $node->append_conf('dbnames.conf', "db1");
 $node->append_conf('dbnames.conf', "db3");
 $hba_expected .= "\n"
-  . $line_counters{'hba_rule'} . "|"
+  . $line_counters{hba_rule} . "|"
   . basename($hba_file) . "|"
   . $line_counters{$hba_file}
   . '|local|{db1,db3}|{all}|reject||';
diff --git a/src/test/ldap/LdapServer.pm b/src/test/ldap/LdapServer.pm
index 58619a3db0a..3782f5f3ce6 100644
--- a/src/test/ldap/LdapServer.pm
+++ b/src/test/ldap/LdapServer.pm
@@ -290,8 +290,8 @@ sub _ldapenv
 {
 	my $self = shift;
 	my %env = %ENV;
-	$env{'LDAPURI'} = $self->{url};
-	$env{'LDAPBINDDN'} = $self->{rootdn};
+	$env{LDAPURI} = $self->{url};
+	$env{LDAPBINDDN} = $self->{rootdn};
 	return %env;
 }
 
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index 352b0fc1fa7..45025403b14 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -41,7 +41,7 @@ my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url,
 ) = $ldap->prop(qw(server port s_port url s_url basedn rootdn));
 
 # don't bother to check the server's cert (though perhaps we should)
-$ENV{'LDAPTLS_REQCERT'} = "never";
+$ENV{LDAPTLS_REQCERT} = "never";
 
 note "setting up PostgreSQL instance";
 
@@ -82,7 +82,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access(
 	$node, 'test0', 2,
 	'simple bind authentication fails if user not found in LDAP',
@@ -92,7 +92,7 @@ test_access(
 	'simple bind authentication fails with wrong password',
 	log_unlike => [qr/connection authenticated:/]);
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access(
 	$node, 'test1', 0,
 	'simple bind authentication succeeds',
@@ -114,12 +114,12 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
 	'search+bind authentication fails if user not found in LDAP');
 test_access($node, 'test1', 2,
 	'search+bind authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access(
 	$node, 'test1', 0,
 	'search+bind authentication succeeds',
@@ -135,12 +135,12 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
 	'search+bind authentication fails if user not found in LDAP');
 test_access($node, 'test1', 2,
 	'search+bind authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'search+bind authentication succeeds');
 
 note "LDAP URLs";
@@ -151,13 +151,13 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
 	'simple bind with LDAP URL authentication fails if user not found in LDAP'
 );
 test_access($node, 'test1', 2,
 	'simple bind with LDAP URL authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0,
 	'simple bind with LDAP URL authentication succeeds');
 
@@ -166,13 +166,13 @@ $node->append_conf('pg_hba.conf',
 	qq{local all all ldap ldapurl="$ldap_url/$ldap_basedn?uid?sub"});
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'wrong';
+$ENV{PGPASSWORD} = 'wrong';
 test_access($node, 'test0', 2,
 	'search+bind with LDAP URL authentication fails if user not found in LDAP'
 );
 test_access($node, 'test1', 2,
 	'search+bind with LDAP URL authentication fails with wrong password');
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0,
 	'search+bind with LDAP URL authentication succeeds');
 
@@ -184,14 +184,14 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access(
 	$node, 'test1', 0,
 	'search filter finds by uid',
 	log_like => [
 		qr/connection authenticated: identity="uid=test1,dc=example,dc=net" method=ldap/
 	],);
-$ENV{"PGPASSWORD"} = 'secret2';
+$ENV{PGPASSWORD} = 'secret2';
 test_access(
 	$node,
 	'te...@example.net',
@@ -209,9 +209,9 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'search filter finds by uid');
-$ENV{"PGPASSWORD"} = 'secret2';
+$ENV{PGPASSWORD} = 'secret2';
 test_access($node, 'te...@example.net', 0, 'search filter finds by mail');
 
 # This is not documented: You can combine ldapurl and other ldap*
@@ -223,7 +223,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'combined LDAP URL and search filter');
 
 note "diagnostic message";
@@ -235,7 +235,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 2, 'any attempt fails due to bad search pattern');
 
 note "TLS";
@@ -247,7 +247,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'StartTLS');
 
 # request LDAPS with ldapscheme=ldaps
@@ -257,7 +257,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'LDAPS');
 
 # request LDAPS with ldapurl=ldaps://...
@@ -267,7 +267,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 0, 'LDAPS with URL');
 
 # bad combination of LDAPS and StartTLS
@@ -277,7 +277,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 2, 'bad combination of LDAPS and StartTLS');
 
 done_testing();
diff --git a/src/test/ldap/t/002_bindpasswd.pl b/src/test/ldap/t/002_bindpasswd.pl
index f8beba2b279..c5384d32088 100644
--- a/src/test/ldap/t/002_bindpasswd.pl
+++ b/src/test/ldap/t/002_bindpasswd.pl
@@ -78,7 +78,7 @@ $node->append_conf('pg_hba.conf',
 );
 $node->restart;
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 test_access($node, 'test1', 2,
 	'search+bind authentication fails with wrong ldapbindpasswd');
 
diff --git a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
index 9b062e1c800..30a3018b35b 100644
--- a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
+++ b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
@@ -71,7 +71,7 @@ sub test_access
 
 note "use ldapbindpasswd";
 
-$ENV{"PGPASSWORD"} = 'secret1';
+$ENV{PGPASSWORD} = 'secret1';
 
 unlink($node->data_dir . '/pg_hba.conf');
 $node->append_conf('pg_hba.conf',
diff --git a/src/test/modules/oauth_validator/t/001_server.pl b/src/test/modules/oauth_validator/t/001_server.pl
index 6fa59fbeb25..97dbac68ef5 100644
--- a/src/test/modules/oauth_validator/t/001_server.pl
+++ b/src/test/modules/oauth_validator/t/001_server.pl
@@ -173,20 +173,20 @@ $user = "test";
 foreach my $c (@cases)
 {
 	my $connstr =
-	  "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635 require_auth=$c->{'require_auth'}";
+	  "user=$user dbname=postgres oauth_issuer=$issuer oauth_client_id=f02c6361-0635 require_auth=$c->{require_auth}";
 
-	if (defined $c->{'failure'})
+	if (defined $c->{failure})
 	{
 		$node->connect_fails(
 			$connstr,
-			"require_auth=$c->{'require_auth'} fails",
-			expected_stderr => $c->{'failure'});
+			"require_auth=$c->{require_auth} fails",
+			expected_stderr => $c->{failure});
 	}
 	else
 	{
 		$node->connect_ok(
 			$connstr,
-			"require_auth=$c->{'require_auth'} succeeds",
+			"require_auth=$c->{require_auth} succeeds",
 			expected_stderr =>
 			  qr@Visit https://example\.com/ and enter the code: postgresuser@
 		);
diff --git a/src/test/modules/oauth_validator/t/002_client.pl b/src/test/modules/oauth_validator/t/002_client.pl
index ab83258d736..c3453674b89 100644
--- a/src/test/modules/oauth_validator/t/002_client.pl
+++ b/src/test/modules/oauth_validator/t/002_client.pl
@@ -146,9 +146,9 @@ my @cases = (
 foreach my $c (@cases)
 {
 	test(
-		"hook misbehavior: $c->{'flag'}",
-		flags => [ $c->{'flag'} ],
-		expected_stderr => $c->{'expected_error'});
+		"hook misbehavior: $c->{flag}",
+		flags => [ $c->{flag} ],
+		expected_stderr => $c->{expected_error});
 }
 
 done_testing();
diff --git a/src/test/modules/oauth_validator/t/OAuth/Server.pm b/src/test/modules/oauth_validator/t/OAuth/Server.pm
index 655b2870b0b..71586b86a62 100644
--- a/src/test/modules/oauth_validator/t/OAuth/Server.pm
+++ b/src/test/modules/oauth_validator/t/OAuth/Server.pm
@@ -74,7 +74,7 @@ sub port
 {
 	my $self = shift;
 
-	return $self->{'port'};
+	return $self->{port};
 }
 
 =pod
@@ -102,9 +102,9 @@ sub run
 	die "server did not advertise a valid port"
 	  unless Scalar::Util::looks_like_number($port);
 
-	$self->{'pid'} = $pid;
-	$self->{'port'} = $port;
-	$self->{'child'} = $read_fh;
+	$self->{pid} = $pid;
+	$self->{port} = $port;
+	$self->{child} = $read_fh;
 
 	note("OAuth provider (PID $pid) is listening on port $port\n");
 }
@@ -121,14 +121,14 @@ sub stop
 {
 	my $self = shift;
 
-	note("Sending SIGTERM to OAuth provider PID: $self->{'pid'}\n");
+	note("Sending SIGTERM to OAuth provider PID: $self->{pid}\n");
 
-	kill(15, $self->{'pid'});
-	$self->{'pid'} = undef;
+	kill(15, $self->{pid});
+	$self->{pid} = undef;
 
 	# Closing the popen() handle waits for the process to exit.
-	close($self->{'child'});
-	$self->{'child'} = undef;
+	close($self->{child});
+	$self->{child} = undef;
 }
 
 =pod
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
index 3c3c6db3512..e67fec299f4 100644
--- a/src/test/modules/test_pg_dump/t/001_base.pl
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -869,7 +869,7 @@ my %tests = (
 # Create a PG instance to test actually dumping from
 
 my $node = PostgreSQL::Test::Cluster->new('main');
-$node->init('auth_extra' => [ '--create-role' => 'regress_dump_login_role' ]);
+$node->init(auth_extra => [ '--create-role' => 'regress_dump_login_role' ]);
 $node->start;
 
 my $port = $node->port;
diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
index 81a8f44aa9f..702982dd3e6 100644
--- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
+++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
@@ -188,7 +188,7 @@ sub adjust_database_contents
 		}
 
 		# this table had OIDs too, but we'll just drop it
-		if ($old_version >= 10 && $dbnames{'contrib_regression_postgres_fdw'})
+		if ($old_version >= 10 && $dbnames{contrib_regression_postgres_fdw})
 		{
 			_add_st(
 				$result,
diff --git a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
index 60bbd5dd445..3c2a6751f39 100644
--- a/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
+++ b/src/test/perl/PostgreSQL/Test/BackgroundPsql.pm
@@ -86,11 +86,11 @@ sub new
 	my $class = shift;
 	my ($interactive, $psql_params, $timeout, $wait) = @_;
 	my $psql = {
-		'stdin' => '',
-		'stdout' => '',
-		'stderr' => '',
-		'query_timer_restart' => undef,
-		'query_cnt' => 1,
+		stdin => '',
+		stdout => '',
+		stderr => '',
+		query_timer_restart => undef,
+		query_cnt => 1,
 	};
 	my $run;
 
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 0750915a9a8..d8690fce351 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -1019,7 +1019,7 @@ sub init_from_backup
 		PostgreSQL::Test::RecursiveCopy::copypath(
 			$backup_path,
 			$data_path,
-			'filterfn' => sub {
+			filterfn => sub {
 				my ($path) = @_;
 				if ($path =~ /^pg_tblspc\/(\d+)$/
 					&& exists $params{tablespace_map}{$1})
@@ -1936,7 +1936,7 @@ END
 		$node->teardown_node(fail_ok => 1);
 
 		# skip clean if we are requested to retain the basedir
-		next if defined $ENV{'PG_TEST_NOCLEAN'};
+		next if defined $ENV{PG_TEST_NOCLEAN};
 
 		# clean basedir on clean test invocation
 		$node->clean_node
@@ -2977,11 +2977,11 @@ sub lsn
 {
 	my ($self, $mode) = @_;
 	my %modes = (
-		'insert' => 'pg_current_wal_insert_lsn()',
-		'flush' => 'pg_current_wal_flush_lsn()',
-		'write' => 'pg_current_wal_lsn()',
-		'receive' => 'pg_last_wal_receive_lsn()',
-		'replay' => 'pg_last_wal_replay_lsn()');
+		insert => 'pg_current_wal_insert_lsn()',
+		flush => 'pg_current_wal_flush_lsn()',
+		write => 'pg_current_wal_lsn()',
+		receive => 'pg_last_wal_receive_lsn()',
+		replay => 'pg_last_wal_replay_lsn()');
 
 	$mode = '<undef>' if !defined($mode);
 	croak "unknown mode for 'lsn': '$mode', valid modes are "
@@ -3233,7 +3233,7 @@ sub wait_for_catchup
 	my ($self, $standby_name, $mode, $target_lsn) = @_;
 	$mode = defined($mode) ? $mode : 'replay';
 	my %valid_modes =
-	  ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
+	  (sent => 1, write => 1, flush => 1, replay => 1);
 	croak "unknown mode $mode for 'wait_for_catchup', valid modes are "
 	  . join(', ', keys(%valid_modes))
 	  unless exists($valid_modes{$mode});
@@ -3715,7 +3715,7 @@ sub create_logical_slot_on_standby
 
 	$handle->finish();
 
-	is($self->slot($slot_name)->{'slot_type'},
+	is($self->slot($slot_name)->{slot_type},
 		'logical', $slot_name . ' on standby created')
 	  or die "could not create slot" . $slot_name;
 }
diff --git a/src/test/perl/PostgreSQL/Test/Kerberos.pm b/src/test/perl/PostgreSQL/Test/Kerberos.pm
index b72dd2fbaf4..fa58936f75c 100644
--- a/src/test/perl/PostgreSQL/Test/Kerberos.pm
+++ b/src/test/perl/PostgreSQL/Test/Kerberos.pm
@@ -184,9 +184,9 @@ $realm = {
 	  or BAIL_OUT("could not create directory \"$kdc_datadir\"");
 
 	# Ensure that we use test's config and cache files, not global ones.
-	$ENV{'KRB5_CONFIG'} = $krb5_conf;
-	$ENV{'KRB5_KDC_PROFILE'} = $kdc_conf;
-	$ENV{'KRB5CCNAME'} = $krb5_cache;
+	$ENV{KRB5_CONFIG} = $krb5_conf;
+	$ENV{KRB5_KDC_PROFILE} = $kdc_conf;
+	$ENV{KRB5CCNAME} = $krb5_cache;
 
 	my $service_principal = "$ENV{with_krb_srvnam}/$host";
 
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index d1ad131eadf..70761506f67 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -294,7 +294,7 @@ sub tempdir
 	return File::Temp::tempdir(
 		$prefix . '_XXXX',
 		DIR => $tmp_check,
-		CLEANUP => not defined $ENV{'PG_TEST_NOCLEAN'});
+		CLEANUP => not defined $ENV{PG_TEST_NOCLEAN});
 }
 
 =pod
@@ -310,7 +310,7 @@ sub tempdir_short
 {
 
 	return File::Temp::tempdir(
-		CLEANUP => not defined $ENV{'PG_TEST_NOCLEAN'});
+		CLEANUP => not defined $ENV{PG_TEST_NOCLEAN});
 }
 
 =pod
diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl
index 85f5ef03dec..325a00efd47 100644
--- a/src/test/postmaster/t/002_connection_limits.pl
+++ b/src/test/postmaster/t/002_connection_limits.pl
@@ -13,7 +13,7 @@ use Test::More;
 # Initialize the server with specific low connection limits
 my $node = PostgreSQL::Test::Cluster->new('primary');
 $node->init(
-	'auth_extra' => [
+	auth_extra => [
 		'--create-role' =>
 		  'regress_regular,regress_reserved,regress_superuser',
 	]);
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index ccd8417d449..1c860025ac8 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -378,7 +378,7 @@ sub get_slot_xmins
 	]) or die "Timed out waiting for slot xmins to advance";
 
 	my $slotinfo = $node->slot($slotname);
-	return ($slotinfo->{'xmin'}, $slotinfo->{'catalog_xmin'});
+	return ($slotinfo->{xmin}, $slotinfo->{catalog_xmin});
 }
 
 # There's no hot standby feedback and there are no logical slots on either peer
diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl
index a5678bc4dc4..572aa877600 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -161,7 +161,7 @@ SKIP:
 	is($node_primary->psql('postgres', 'DROP DATABASE otherdb'),
 		3, 'dropping a DB with active logical slots fails');
 	$pg_recvlogical->kill_kill;
-	is($node_primary->slot('otherdb_slot')->{'slot_name'},
+	is($node_primary->slot('otherdb_slot')->{slot_name},
 		undef, 'logical slot still exists');
 }
 
@@ -171,7 +171,7 @@ $node_primary->poll_query_until('otherdb',
 
 is($node_primary->psql('postgres', 'DROP DATABASE otherdb'),
 	0, 'dropping a DB with inactive logical slots succeeds');
-is($node_primary->slot('otherdb_slot')->{'slot_name'},
+is($node_primary->slot('otherdb_slot')->{slot_name},
 	undef, 'logical slot was actually dropped with DB');
 
 # Test logical slot advancing and its durability.
diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl
index 08615f1fca8..5954b3afe22 100644
--- a/src/test/recovery/t/010_logical_decoding_timelines.pl
+++ b/src/test/recovery/t/010_logical_decoding_timelines.pl
@@ -94,7 +94,7 @@ is( $node_replica->safe_psql(
 		'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']),
 	'',
 	'dropped DB dropme on standby');
-is($node_primary->slot('dropme_slot')->{'slot_name'},
+is($node_primary->slot('dropme_slot')->{slot_name},
 	undef, 'logical slot was actually dropped on standby');
 
 # Back to testing failover...
@@ -123,14 +123,14 @@ $node_primary->poll_query_until(
 	]) or die "slot's catalog_xmin never became set";
 
 my $phys_slot = $node_primary->slot('phys_slot');
-isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of primary');
-isnt($phys_slot->{'catalog_xmin'},
+isnt($phys_slot->{xmin}, '', 'xmin assigned on physical slot of primary');
+isnt($phys_slot->{catalog_xmin},
 	'', 'catalog_xmin assigned on physical slot of primary');
 
 # Ignore wrap-around here, we're on a new cluster:
 cmp_ok(
-	$phys_slot->{'xmin'}, '>=',
-	$phys_slot->{'catalog_xmin'},
+	$phys_slot->{xmin}, '>=',
+	$phys_slot->{catalog_xmin},
 	'xmin on physical slot must not be lower than catalog_xmin');
 
 $node_primary->safe_psql('postgres', 'CHECKPOINT');
diff --git a/src/test/recovery/t/021_row_visibility.pl b/src/test/recovery/t/021_row_visibility.pl
index 42740745bfd..2fd3aa6e20d 100644
--- a/src/test/recovery/t/021_row_visibility.pl
+++ b/src/test/recovery/t/021_row_visibility.pl
@@ -48,7 +48,7 @@ $psql_primary{run} = IPC::Run::start(
 	'2>' => \$psql_primary{stderr},
 	$psql_timeout);
 
-my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => '');
+my %psql_standby = (stdin => '', stdout => '', stderr => '');
 $psql_standby{run} = IPC::Run::start(
 	[
 		'psql', '--no-psqlrc', '--no-align',
diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl
index 492ef115ba4..0ed966906a4 100644
--- a/src/test/recovery/t/032_relfilenode_reuse.pl
+++ b/src/test/recovery/t/032_relfilenode_reuse.pl
@@ -46,7 +46,7 @@ $psql_primary{run} = IPC::Run::start(
 	'2>' => \$psql_primary{stderr},
 	$psql_timeout);
 
-my %psql_standby = ('stdin' => '', 'stdout' => '', 'stderr' => '');
+my %psql_standby = (stdin => '', stdout => '', stderr => '');
 $psql_standby{run} = IPC::Run::start(
 	[
 		'psql', '--no-psqlrc', '--no-align',
diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl
index c31cab06f1c..d34ed576e1f 100644
--- a/src/test/recovery/t/035_standby_logical_decoding.pl
+++ b/src/test/recovery/t/035_standby_logical_decoding.pl
@@ -122,9 +122,9 @@ sub check_slots_dropped
 {
 	my ($slot_prefix, $slot_user_handle) = @_;
 
-	is($node_standby->slot($slot_prefix . 'inactiveslot')->{'slot_type'},
+	is($node_standby->slot($slot_prefix . 'inactiveslot')->{slot_type},
 		'', 'inactiveslot on standby dropped');
-	is($node_standby->slot($slot_prefix . 'activeslot')->{'slot_type'},
+	is($node_standby->slot($slot_prefix . 'activeslot')->{slot_type},
 		'', 'activeslot on standby dropped');
 
 	check_pg_recvlogical_stderr($slot_user_handle, "conflict with recovery");
@@ -328,9 +328,9 @@ $node_subscriber->init;
 $node_subscriber->start;
 
 my %psql_subscriber = (
-	'subscriber_stdin' => '',
-	'subscriber_stdout' => '',
-	'subscriber_stderr' => '');
+	subscriber_stdin => '',
+	subscriber_stdout => '',
+	subscriber_stderr => '');
 $psql_subscriber{run} = IPC::Run::start(
 	[
 		'psql', '--no-psqlrc', '--no-align',
@@ -886,7 +886,7 @@ is( $node_standby->safe_psql(
 
 check_slots_dropped('drop_db', $handle);
 
-is($node_standby->slot('otherslot')->{'slot_type'},
+is($node_standby->slot('otherslot')->{slot_type},
 	'logical', 'otherslot on standby not dropped');
 
 # Cleanup : manually drop the slot that was not dropped.
diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl
index fffc51f4047..9e4947f4e3c 100644
--- a/src/test/ssl/t/002_scram.pl
+++ b/src/test/ssl/t/002_scram.pl
@@ -71,8 +71,8 @@ my $md5_works = ($node->psql('postgres', "select md5('')") == 0);
 $ssl_server->configure_test_server_for_ssl(
 	$node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
 	"scram-sha-256",
-	'password' => "pass",
-	'password_enc' => "scram-sha-256");
+	password => "pass",
+	password_enc => "scram-sha-256");
 switch_server_cert($node, certfile => 'server-cn-only');
 $ENV{PGPASSWORD} = "pass";
 $common_connstr =
diff --git a/src/test/ssl/t/003_sslinfo.pl b/src/test/ssl/t/003_sslinfo.pl
index b9eae8d641b..63a455d1fc3 100644
--- a/src/test/ssl/t/003_sslinfo.pl
+++ b/src/test/ssl/t/003_sslinfo.pl
@@ -186,9 +186,9 @@ foreach my $c (@cases)
 	$result = $node->safe_psql(
 		"trustdb",
 		"SELECT ssl_client_cert_present();",
-		connstr => "$common_connstr dbname=trustdb $c->{'opts'}");
-	is($result, $c->{'present'},
-		"ssl_client_cert_present() for $c->{'opts'}");
+		connstr => "$common_connstr dbname=trustdb $c->{opts}");
+	is($result, $c->{present},
+		"ssl_client_cert_present() for $c->{opts}");
 }
 
 done_testing();
diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl
index 36af1c16e7f..b33677ff3dc 100644
--- a/src/test/subscription/t/027_nosuperuser.pl
+++ b/src/test/subscription/t/027_nosuperuser.pl
@@ -374,8 +374,8 @@ SKIP:
 	$node_subscriber1->wait_for_subscription_sync($node_publisher1,
 		'regress_test_sub');
 
-	my $save_pgpassword = $ENV{"PGPASSWORD"};
-	$ENV{"PGPASSWORD"} = 'secret';
+	my $save_pgpassword = $ENV{PGPASSWORD};
+	$ENV{PGPASSWORD} = 'secret';
 
 	# Setup pg_hba configuration so that logical replication connection without
 	# password is not allowed.
@@ -404,7 +404,7 @@ SKIP:
 		'subscription whose owner is a non-superuser must specify password parameter of the connection string'
 	);
 
-	$ENV{"PGPASSWORD"} = $save_pgpassword;
+	$ENV{PGPASSWORD} = $save_pgpassword;
 
 	# It should succeed after including the password parameter of the connection
 	# string.
diff --git a/src/tools/win32tzlist.pl b/src/tools/win32tzlist.pl
index 706b1f78f80..ef8a84b694c 100755
--- a/src/tools/win32tzlist.pl
+++ b/src/tools/win32tzlist.pl
@@ -59,9 +59,9 @@ foreach my $keyname (@subkeys)
 	  unless ($vals{Std} && $vals{Dlt} && $vals{Display});
 	push @system_zones,
 	  {
-		'std' => $vals{Std}->[2],
-		'dlt' => $vals{Dlt}->[2],
-		'display' => clean_displayname($vals{Display}->[2]),
+		std => $vals{Std}->[2],
+		dlt => $vals{Dlt}->[2],
+		display => clean_displayname($vals{Display}->[2]),
 	  };
 }
 
@@ -90,10 +90,10 @@ while ($pgtz =~
 {
 	push @file_zones,
 	  {
-		'display' => clean_displayname($1),
-		'std' => $2,
-		'dlt' => $3,
-		'match' => $4,
+		display => clean_displayname($1),
+		std => $2,
+		dlt => $3,
+		match => $4,
 	  };
 }
 
-- 
2.43.0

Reply via email to