From c768f399c556295de7d53895410e686d86b4b960 Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Sun, 9 Nov 2025 19:34:58 +0700
Subject: [PATCH 01/10] Converting one huge transaction into series of one per
 'scale'

---
 src/bin/pgbench/pgbench.c | 61 ++++++++++++++++++++++++++-------------
 1 file changed, 41 insertions(+), 20 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index d8764ba6fe0..284a7c860f1 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -181,6 +181,12 @@ static int64 end_time = 0;		/* when to stop in micro seconds, under -T */
  */
 static int	scale = 1;
 
+/*
+ * scaling factor after which we switch to multiple transactions during
+ * data population phase on server side
+ */
+static int64	single_txn_scale_limit = 1;
+
 /*
  * fillfactor. for example, fillfactor = 90 will use only 90 percent
  * space during inserts and leave 10 percent free.
@@ -5213,6 +5219,7 @@ static void
 initGenerateDataServerSide(PGconn *con)
 {
 	PQExpBufferData sql;
+	int				chunk = (scale >= single_txn_scale_limit) ? 1 : scale;
 
 	fprintf(stderr, "generating data (server-side)...\n");
 
@@ -5225,30 +5232,44 @@ initGenerateDataServerSide(PGconn *con)
 	/* truncate away any old data */
 	initTruncateTables(con);
 
+	executeStatement(con, "commit");
+
 	initPQExpBuffer(&sql);
 
-	printfPQExpBuffer(&sql,
-					  "insert into pgbench_branches(bid,bbalance) "
-					  "select bid, 0 "
-					  "from generate_series(1, %d) as bid", nbranches * scale);
-	executeStatement(con, sql.data);
-
-	printfPQExpBuffer(&sql,
-					  "insert into pgbench_tellers(tid,bid,tbalance) "
-					  "select tid, (tid - 1) / %d + 1, 0 "
-					  "from generate_series(1, %d) as tid", ntellers, ntellers * scale);
-	executeStatement(con, sql.data);
-
-	printfPQExpBuffer(&sql,
-					  "insert into pgbench_accounts(aid,bid,abalance,filler) "
-					  "select aid, (aid - 1) / %d + 1, 0, '' "
-					  "from generate_series(1, " INT64_FORMAT ") as aid",
-					  naccounts, (int64) naccounts * scale);
-	executeStatement(con, sql.data);
+	for (int i = 0; i < scale; i += chunk) {
+		executeStatement(con, "begin");
+
+		printfPQExpBuffer(&sql,
+						  "insert into pgbench_branches(bid,bbalance) "
+						  "select bid + 1, 0 "
+						  "from generate_series(%d, %d) as bid", i, i + chunk);
+						  //"select bid, 0 "
+						  //"from generate_series(1, %d) as bid", nbranches * scale);
+		executeStatement(con, sql.data);
+
+		printfPQExpBuffer(&sql,
+						  "insert into pgbench_tellers(tid,bid,tbalance) "
+						  "select tid + 1, tid / %d + 1, 0 "
+						  "from generate_series(%d, %d) as tid",
+						  ntellers, i * ntellers, (i + chunk) * ntellers - 1);
+						  //"select tid, (tid - 1) / %d + 1, 0 "
+						  //"from generate_series(1, %d) as tid", ntellers, ntellers * scale);
+		executeStatement(con, sql.data);
+
+		printfPQExpBuffer(&sql,
+						  "insert into pgbench_accounts(aid,bid,abalance,filler) "
+						  "select aid + 1, aid / %d + 1, 0, '' "
+						  "from generate_series(" INT64_FORMAT ", " INT64_FORMAT ") as aid",
+						  naccounts, (int64) i * naccounts, (int64) (i + chunk) * naccounts - 1);
+						  //"select aid, (aid - 1) / %d + 1, 0, '' "
+						  //"from generate_series(1, " INT64_FORMAT ") as aid",
+						  //naccounts, (int64) naccounts * scale);
+		executeStatement(con, sql.data);
+
+		executeStatement(con, "commit");
+	}
 
 	termPQExpBuffer(&sql);
-
-	executeStatement(con, "commit");
 }
 
 /*
-- 
2.43.0


From 0eddb156c187d829c4381bc928c5314705928852 Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Sun, 9 Nov 2025 20:13:23 +0700
Subject: [PATCH 02/10] Getting rid off limit for single transaction size
 during data generation

---
 src/bin/pgbench/pgbench.c | 15 ++++-----------
 1 file changed, 4 insertions(+), 11 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 284a7c860f1..28b72e4cf1f 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -181,12 +181,6 @@ static int64 end_time = 0;		/* when to stop in micro seconds, under -T */
  */
 static int	scale = 1;
 
-/*
- * scaling factor after which we switch to multiple transactions during
- * data population phase on server side
- */
-static int64	single_txn_scale_limit = 1;
-
 /*
  * fillfactor. for example, fillfactor = 90 will use only 90 percent
  * space during inserts and leave 10 percent free.
@@ -5219,7 +5213,6 @@ static void
 initGenerateDataServerSide(PGconn *con)
 {
 	PQExpBufferData sql;
-	int				chunk = (scale >= single_txn_scale_limit) ? 1 : scale;
 
 	fprintf(stderr, "generating data (server-side)...\n");
 
@@ -5236,13 +5229,13 @@ initGenerateDataServerSide(PGconn *con)
 
 	initPQExpBuffer(&sql);
 
-	for (int i = 0; i < scale; i += chunk) {
+	for (int i = 0; i < scale; i++) {
 		executeStatement(con, "begin");
 
 		printfPQExpBuffer(&sql,
 						  "insert into pgbench_branches(bid,bbalance) "
 						  "select bid + 1, 0 "
-						  "from generate_series(%d, %d) as bid", i, i + chunk);
+						  "from generate_series(%d, %d) as bid", i, i + 1);
 						  //"select bid, 0 "
 						  //"from generate_series(1, %d) as bid", nbranches * scale);
 		executeStatement(con, sql.data);
@@ -5251,7 +5244,7 @@ initGenerateDataServerSide(PGconn *con)
 						  "insert into pgbench_tellers(tid,bid,tbalance) "
 						  "select tid + 1, tid / %d + 1, 0 "
 						  "from generate_series(%d, %d) as tid",
-						  ntellers, i * ntellers, (i + chunk) * ntellers - 1);
+						  ntellers, i * ntellers, (i + 1) * ntellers - 1);
 						  //"select tid, (tid - 1) / %d + 1, 0 "
 						  //"from generate_series(1, %d) as tid", ntellers, ntellers * scale);
 		executeStatement(con, sql.data);
@@ -5260,7 +5253,7 @@ initGenerateDataServerSide(PGconn *con)
 						  "insert into pgbench_accounts(aid,bid,abalance,filler) "
 						  "select aid + 1, aid / %d + 1, 0, '' "
 						  "from generate_series(" INT64_FORMAT ", " INT64_FORMAT ") as aid",
-						  naccounts, (int64) i * naccounts, (int64) (i + chunk) * naccounts - 1);
+						  naccounts, (int64) i * naccounts, (int64) (i + 1) * naccounts - 1);
 						  //"select aid, (aid - 1) / %d + 1, 0, '' "
 						  //"from generate_series(1, " INT64_FORMAT ") as aid",
 						  //naccounts, (int64) naccounts * scale);
-- 
2.43.0


From c5659cf474ec273c057668f30a4f435fd02f2da7 Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Sun, 9 Nov 2025 20:38:36 +0700
Subject: [PATCH 03/10] No need to keep old code in comments

---
 src/bin/pgbench/pgbench.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 28b72e4cf1f..97895aa9edf 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -5236,8 +5236,6 @@ initGenerateDataServerSide(PGconn *con)
 						  "insert into pgbench_branches(bid,bbalance) "
 						  "select bid + 1, 0 "
 						  "from generate_series(%d, %d) as bid", i, i + 1);
-						  //"select bid, 0 "
-						  //"from generate_series(1, %d) as bid", nbranches * scale);
 		executeStatement(con, sql.data);
 
 		printfPQExpBuffer(&sql,
@@ -5245,8 +5243,6 @@ initGenerateDataServerSide(PGconn *con)
 						  "select tid + 1, tid / %d + 1, 0 "
 						  "from generate_series(%d, %d) as tid",
 						  ntellers, i * ntellers, (i + 1) * ntellers - 1);
-						  //"select tid, (tid - 1) / %d + 1, 0 "
-						  //"from generate_series(1, %d) as tid", ntellers, ntellers * scale);
 		executeStatement(con, sql.data);
 
 		printfPQExpBuffer(&sql,
@@ -5254,9 +5250,6 @@ initGenerateDataServerSide(PGconn *con)
 						  "select aid + 1, aid / %d + 1, 0, '' "
 						  "from generate_series(" INT64_FORMAT ", " INT64_FORMAT ") as aid",
 						  naccounts, (int64) i * naccounts, (int64) (i + 1) * naccounts - 1);
-						  //"select aid, (aid - 1) / %d + 1, 0, '' "
-						  //"from generate_series(1, " INT64_FORMAT ") as aid",
-						  //naccounts, (int64) naccounts * scale);
 		executeStatement(con, sql.data);
 
 		executeStatement(con, "commit");
-- 
2.43.0


From e47b52ddf23593dad9375ef5356fd41d0621ede3 Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Mon, 10 Nov 2025 19:06:48 +0700
Subject: [PATCH 04/10] Adding server-side data generation via unnest

---
 src/bin/pgbench/pgbench.c | 199 ++++++++++++++++++++++++++++++++++----
 1 file changed, 182 insertions(+), 17 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 97895aa9edf..65d77cdefea 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -161,7 +161,7 @@ typedef struct socket_set
  * some configurable parameters */
 
 #define DEFAULT_INIT_STEPS "dtgvp"	/* default -I setting */
-#define ALL_INIT_STEPS "dtgGvpf"	/* all possible steps */
+#define ALL_INIT_STEPS "dtgGiIvpf"	/* all possible steps */
 
 #define LOG_STEP_SECONDS	5	/* seconds between log messages */
 #define DEFAULT_NXACTS	10		/* default nxacts */
@@ -171,6 +171,12 @@ typedef struct socket_set
 #define MIN_ZIPFIAN_PARAM		1.001	/* minimum parameter for zipfian */
 #define MAX_ZIPFIAN_PARAM		1000.0	/* maximum parameter for zipfian */
 
+/* original single transaction server-side method */
+#define GEN_TYPE_INSERT_ORIGINAL	'G'	/* use INSERT .. SELECT generate_series to generate data */
+/* 'one transaction per scale' server-side methods */
+#define GEN_TYPE_INSERT_SERIES		'i'	/* use INSERT .. SELECT generate_series to generate data */
+#define GEN_TYPE_INSERT_UNNEST  	'I'	/* use INSERT .. SELECT unnest to generate data */
+
 static int	nxacts = 0;			/* number of transactions per client */
 static int	duration = 0;		/* duration in seconds */
 static int64 end_time = 0;		/* when to stop in micro seconds, under -T */
@@ -181,6 +187,11 @@ static int64 end_time = 0;		/* when to stop in micro seconds, under -T */
  */
 static int	scale = 1;
 
+/*
+ *
+ */
+static char	data_generation_type = '?';
+
 /*
  * fillfactor. for example, fillfactor = 90 will use only 90 percent
  * space during inserts and leave 10 percent free.
@@ -914,7 +925,9 @@ usage(void)
 		   "                           d: drop any existing pgbench tables\n"
 		   "                           t: create the tables used by the standard pgbench scenario\n"
 		   "                           g: generate data, client-side\n"
-		   "                           G: generate data, server-side\n"
+		   "                           G: generate data, server-side in single transaction\n"
+		   "                           i:   server-side (multiple TXNs) INSERT .. SELECT generate_series\n"
+		   "                           I:   server-side (multiple TXNs) INSERT .. SELECT unnest\n"
 		   "                           v: invoke VACUUM on the standard tables\n"
 		   "                           p: create primary key indexes on the standard tables\n"
 		   "                           f: create foreign keys between the standard tables\n"
@@ -5203,18 +5216,16 @@ initGenerateDataClientSide(PGconn *con)
 }
 
 /*
- * Fill the standard tables with some data generated on the server
- *
- * As already the case with the client-side data generation, the filler
- * column defaults to NULL in pgbench_branches and pgbench_tellers,
- * and is a blank-padded string in pgbench_accounts.
+ * Generating data via INSERT .. SELECT .. FROM generate_series
+ * whole dataset in single transaction
  */
 static void
-initGenerateDataServerSide(PGconn *con)
+generateDataInsertSingleTXN(PGconn *con)
 {
 	PQExpBufferData sql;
 
-	fprintf(stderr, "generating data (server-side)...\n");
+	fprintf(stderr, "via INSERT .. SELECT generate_series... in single TXN\n");
+
 
 	/*
 	 * we do all of this in one transaction to enable the backend's
@@ -5225,31 +5236,136 @@ initGenerateDataServerSide(PGconn *con)
 	/* truncate away any old data */
 	initTruncateTables(con);
 
+	initPQExpBuffer(&sql);
+
+	printfPQExpBuffer(&sql,
+					  "insert into pgbench_branches(bid, bbalance) "
+					  "select bid, 0 "
+					  "from generate_series(1, %d)", scale * nbranches);
+	executeStatement(con, sql.data);
+
+	printfPQExpBuffer(&sql,
+					  "insert into pgbench_tellers(tid, bid, tbalance) "
+					  "select tid + 1, tid / %d + 1, 0 "
+					  "from generate_series(0, %d) as tid",
+					  ntellers, (scale * ntellers) - 1);
+	executeStatement(con, sql.data);
+
+	printfPQExpBuffer(&sql,
+					  "insert into pgbench_accounts(aid, bid, abalance, "
+								   "filler) "
+					  "select aid + 1, aid / %d + 1, 0, '' "
+					  "from generate_series(0, " INT64_FORMAT ") as aid",
+					  naccounts, (int64) (scale * naccounts) - 1);
+	executeStatement(con, sql.data);
+
 	executeStatement(con, "commit");
 
+	termPQExpBuffer(&sql);
+}
+
+
+/*
+ * Generating data via INSERT .. SELECT .. FROM generate_series
+ * One transaction per 'scale'
+ */
+static void
+generateDataInsertSeries(PGconn *con)
+{
+	PQExpBufferData sql;
+
+	fprintf(stderr, "via INSERT .. SELECT generate_series... in multiple TXN(s)\n");
+
 	initPQExpBuffer(&sql);
 
-	for (int i = 0; i < scale; i++) {
+	executeStatement(con, "begin");
+
+	/* truncate away any old data */
+	initTruncateTables(con);
+
+	executeStatement(con, "commit");
+
+	for (int i = 0; i < scale; i++)
+	{
 		executeStatement(con, "begin");
 
 		printfPQExpBuffer(&sql,
-						  "insert into pgbench_branches(bid,bbalance) "
-						  "select bid + 1, 0 "
-						  "from generate_series(%d, %d) as bid", i, i + 1);
+						  "insert into pgbench_branches(bid, bbalance) "
+						  "values(%d, 0)", i + 1);
 		executeStatement(con, sql.data);
 
 		printfPQExpBuffer(&sql,
-						  "insert into pgbench_tellers(tid,bid,tbalance) "
+						  "insert into pgbench_tellers(tid, bid, tbalance) "
 						  "select tid + 1, tid / %d + 1, 0 "
 						  "from generate_series(%d, %d) as tid",
 						  ntellers, i * ntellers, (i + 1) * ntellers - 1);
 		executeStatement(con, sql.data);
 
 		printfPQExpBuffer(&sql,
-						  "insert into pgbench_accounts(aid,bid,abalance,filler) "
+						  "insert into pgbench_accounts(aid, bid, abalance, "
+									   "filler) "
 						  "select aid + 1, aid / %d + 1, 0, '' "
-						  "from generate_series(" INT64_FORMAT ", " INT64_FORMAT ") as aid",
-						  naccounts, (int64) i * naccounts, (int64) (i + 1) * naccounts - 1);
+						  "from generate_series(" INT64_FORMAT ", "
+								INT64_FORMAT ") as aid",
+						  naccounts, (int64) i * naccounts,
+						  (int64) (i + 1) * naccounts - 1);
+		executeStatement(con, sql.data);
+
+		executeStatement(con, "commit");
+	}
+
+	termPQExpBuffer(&sql);
+}
+
+/*
+ * Generating data via INSERT .. SELECT .. FROM unnest
+ * One transaction per 'scale'
+ */
+static void
+generateDataInsertUnnest(PGconn *con)
+{
+	PQExpBufferData sql;
+
+	fprintf(stderr, "via INSERT .. SELECT unnest...\n");
+
+	initPQExpBuffer(&sql);
+
+	executeStatement(con, "begin");
+
+	/* truncate away any old data */
+	initTruncateTables(con);
+
+	executeStatement(con, "commit");
+
+	for (int s = 0; s < scale; s++)
+	{
+		executeStatement(con, "begin");
+
+		printfPQExpBuffer(&sql,
+						  "insert into pgbench_branches(bid,bbalance) "
+						  "values(%d, 0)", s + 1);
+		executeStatement(con, sql.data);
+
+		printfPQExpBuffer(&sql,
+						  "insert into pgbench_tellers(tid, bid, tbalance) "
+						  "select unnest(array_agg(s.i order by s.i)) as tid, "
+								  "%d as bid, 0 as tbalance "
+						  "from generate_series(%d, %d) as s(i)",
+						  s + 1, s * ntellers + 1, (s + 1) * ntellers);
+		executeStatement(con, sql.data);
+
+		printfPQExpBuffer(&sql,
+						  "with data as ("
+						  "   select generate_series(" INT64_FORMAT ", "
+							  INT64_FORMAT ") as i) "
+						  "insert into pgbench_accounts(aid, bid, "
+									  "abalance, filler) "
+						  "select unnest(aid), unnest(bid), 0 as abalance, "
+								  "'' as filler "
+						  "from (select array_agg(i+1) aid, "
+									   "array_agg(i/%d + 1) bid from data)",
+						  (int64) s * naccounts + 1,
+						  (int64) (s + 1) * naccounts, naccounts);
 		executeStatement(con, sql.data);
 
 		executeStatement(con, "commit");
@@ -5258,6 +5374,32 @@ initGenerateDataServerSide(PGconn *con)
 	termPQExpBuffer(&sql);
 }
 
+/*
+ * Fill the standard tables with some data generated on the server
+ *
+ * As already the case with the client-side data generation, the filler
+ * column defaults to NULL in pgbench_branches and pgbench_tellers,
+ * and is a blank-padded string in pgbench_accounts.
+ */
+static void
+initGenerateDataServerSide(PGconn *con)
+{
+	fprintf(stderr, "generating data (server-side) ");
+
+	switch (data_generation_type)
+	{
+		case GEN_TYPE_INSERT_ORIGINAL:
+			generateDataInsertSingleTXN(con);
+			break;
+		case GEN_TYPE_INSERT_SERIES:
+			generateDataInsertSeries(con);
+			break;
+		case GEN_TYPE_INSERT_UNNEST:
+			generateDataInsertUnnest(con);
+			break;
+	}
+}
+
 /*
  * Invoke vacuum on the standard tables
  */
@@ -5341,6 +5483,8 @@ initCreateFKeys(PGconn *con)
 static void
 checkInitSteps(const char *initialize_steps)
 {
+	char	data_init_type = 0;
+
 	if (initialize_steps[0] == '\0')
 		pg_fatal("no initialization steps specified");
 
@@ -5352,7 +5496,26 @@ checkInitSteps(const char *initialize_steps)
 			pg_log_error_detail("Allowed step characters are: \"" ALL_INIT_STEPS "\".");
 			exit(1);
 		}
+
+		switch (*step)
+		{
+			case 'G':
+				data_init_type++;
+				data_generation_type = *step;
+				break;
+			case 'i':
+				data_init_type++;
+				data_generation_type = *step;
+				break;
+			case 'I':
+				data_init_type++;
+				data_generation_type = *step;
+				break;
+		}
 	}
+
+	if (data_init_type > 1)
+		pg_log_error("WARNING! More than one type of server-side data generation is requested");
 }
 
 /*
@@ -5395,6 +5558,8 @@ runInitSteps(const char *initialize_steps)
 				initGenerateDataClientSide(con);
 				break;
 			case 'G':
+			case 'i':
+			case 'I':
 				op = "server-side generate";
 				initGenerateDataServerSide(con);
 				break;
-- 
2.43.0


From 5e1827b889b283f50299ce6ab1a73f9f55a4a84f Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Mon, 10 Nov 2025 20:00:56 +0700
Subject: [PATCH 05/10] Fixing typo in query

---
 src/bin/pgbench/pgbench.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 65d77cdefea..03e37df4434 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -5241,7 +5241,8 @@ generateDataInsertSingleTXN(PGconn *con)
 	printfPQExpBuffer(&sql,
 					  "insert into pgbench_branches(bid, bbalance) "
 					  "select bid, 0 "
-					  "from generate_series(1, %d)", scale * nbranches);
+					  "from generate_series(1, %d) as bid",
+					  scale * nbranches);
 	executeStatement(con, sql.data);
 
 	printfPQExpBuffer(&sql,
-- 
2.43.0


From 7ca86521fda6929b8e0de3fc77dcbb8984009c88 Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Tue, 11 Nov 2025 19:39:45 +0700
Subject: [PATCH 06/10] Adding support for COPY BINARY mode

---
 src/bin/pgbench/pgbench.c | 393 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 381 insertions(+), 12 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 03e37df4434..71aa1d9479f 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -161,7 +161,7 @@ typedef struct socket_set
  * some configurable parameters */
 
 #define DEFAULT_INIT_STEPS "dtgvp"	/* default -I setting */
-#define ALL_INIT_STEPS "dtgGiIvpf"	/* all possible steps */
+#define ALL_INIT_STEPS "dtgCGiIvpf"	/* all possible steps */
 
 #define LOG_STEP_SECONDS	5	/* seconds between log messages */
 #define DEFAULT_NXACTS	10		/* default nxacts */
@@ -176,6 +176,8 @@ typedef struct socket_set
 /* 'one transaction per scale' server-side methods */
 #define GEN_TYPE_INSERT_SERIES		'i'	/* use INSERT .. SELECT generate_series to generate data */
 #define GEN_TYPE_INSERT_UNNEST  	'I'	/* use INSERT .. SELECT unnest to generate data */
+#define GEN_TYPE_COPY_ORIGINAL		'g' /* use COPY .. FROM STDIN .. TEXT to generate data */
+#define GEN_TYPE_COPY_BINARY		'C' /* use COPY .. FROM STDIN .. BINARY to generate data */
 
 static int	nxacts = 0;			/* number of transactions per client */
 static int	duration = 0;		/* duration in seconds */
@@ -188,10 +190,17 @@ static int64 end_time = 0;		/* when to stop in micro seconds, under -T */
 static int	scale = 1;
 
 /*
- *
+ * mode of data generation to use
  */
 static char	data_generation_type = '?';
 
+/*
+ * COPY FROM BINARY execution buffer
+ */
+#define BIN_COPY_BUF_SIZE	102400				/* maximum buffer size for COPY FROM BINARY */
+static char		*bin_copy_buffer = NULL;		/* buffer for COPY FROM BINARY */
+static int32_t	 bin_copy_buffer_length = 0;	/* current buffer size */
+
 /*
  * fillfactor. for example, fillfactor = 90 will use only 90 percent
  * space during inserts and leave 10 percent free.
@@ -861,7 +870,8 @@ static int	wait_on_socket_set(socket_set *sa, int64 usecs);
 static bool socket_has_input(socket_set *sa, int fd, int idx);
 
 /* callback used to build rows for COPY during data loading */
-typedef void (*initRowMethod) (PQExpBufferData *sql, int64 curr);
+typedef void (*initRowMethod)		(PQExpBufferData *sql, int64 curr);
+typedef void (*initRowMethodBin)	(PGconn *con, PGresult *res, int64_t curr, int32_t parent);
 
 /* callback functions for our flex lexer */
 static const PsqlScanCallbacks pgbench_callbacks = {
@@ -925,6 +935,7 @@ usage(void)
 		   "                           d: drop any existing pgbench tables\n"
 		   "                           t: create the tables used by the standard pgbench scenario\n"
 		   "                           g: generate data, client-side\n"
+		   "                           C:   client-side (single TNX) COPY .. FROM STDIN .. BINARY\n"
 		   "                           G: generate data, server-side in single transaction\n"
 		   "                           i:   server-side (multiple TXNs) INSERT .. SELECT generate_series\n"
 		   "                           I:   server-side (multiple TXNs) INSERT .. SELECT unnest\n"
@@ -5191,9 +5202,9 @@ initPopulateTable(PGconn *con, const char *table, int64 base,
  * a blank-padded string in pgbench_accounts.
  */
 static void
-initGenerateDataClientSide(PGconn *con)
+initGenerateDataClientSideText(PGconn *con)
 {
-	fprintf(stderr, "generating data (client-side)...\n");
+	fprintf(stderr, "TEXT mode...\n");
 
 	/*
 	 * we do all of this in one transaction to enable the backend's
@@ -5209,12 +5220,373 @@ initGenerateDataClientSide(PGconn *con)
 	 * already exist
 	 */
 	initPopulateTable(con, "pgbench_branches", nbranches, initBranch);
-	initPopulateTable(con, "pgbench_tellers", ntellers, initTeller);
+	initPopulateTable(con, "pgbench_tellers",  ntellers,  initTeller);
 	initPopulateTable(con, "pgbench_accounts", naccounts, initAccount);
 
 	executeStatement(con, "commit");
 }
 
+
+/*
+ * Dumps binary buffer to file (purely for debugging)
+ */
+static void
+dumpBufferToFile(char *filename)
+{
+	FILE *file_ptr;
+	size_t bytes_written;
+
+	file_ptr = fopen(filename, "wb");
+	if (file_ptr == NULL)
+	{
+		fprintf(stderr, "Error opening file %s\n", filename);
+		return; // EXIT_FAILURE;
+	}
+
+	bytes_written = fwrite(bin_copy_buffer, 1, bin_copy_buffer_length, file_ptr);
+
+	if (bytes_written != bin_copy_buffer_length)
+	{
+		fprintf(stderr, "Error writing to file or incomplete write\n");
+		fclose(file_ptr);
+		return; // EXIT_FAILURE;
+	}
+
+	fclose(file_ptr);
+}
+
+/*
+ * Save char data to buffer
+ */
+static void
+bufferCharData(char *src, int32_t len)
+{
+	memcpy((char *) bin_copy_buffer + bin_copy_buffer_length, (char *) src, len);
+	bin_copy_buffer_length += len;
+}
+
+/*
+ * Converts platform byte order into network byte order
+ * SPARC doesn't reqire that
+ */
+static void
+bufferData(void *src, int32_t len)
+{
+#ifdef __sparc__
+	bufferCharData(src, len);
+#else
+	if (len == 1)
+		bufferCharData(src, len);
+	else
+		for (int32_t i = 0; i < len; i++)
+		{
+			((char *) bin_copy_buffer + bin_copy_buffer_length)[i] =
+				((char *) src)[len - i - 1];
+		}
+
+	bin_copy_buffer_length += len;
+#endif
+}
+
+/*
+ * adds column counter
+ */
+static void
+addColumnCounter(int16_t n)
+{
+	bufferData((void *) &n, sizeof(n));
+}
+
+/*
+ * adds column with NULL value
+ */
+static void
+addNullColumn()
+{
+	int32_t null = -1;
+	bufferData((void *) &null, sizeof(null));
+}
+
+/*
+ * adds column with int8 value
+ */
+static void
+addInt8Column(int8_t value)
+{
+	int8_t	data = value;
+	int32_t	size = sizeof(data);
+	bufferData((void *) &size, sizeof(size));
+	bufferData((void *) &data, sizeof(data));
+}
+
+/*
+ * adds column with int16 value
+ */
+static void
+addInt16Column(int16_t value)
+{
+	int16_t	data = value;
+	int32_t	size = sizeof(data);
+	bufferData((void *) &size, sizeof(size));
+	bufferData((void *) &data, sizeof(data));
+}
+
+/*
+ * adds column with inti32 value
+ */
+static void
+addInt32Column(int32_t value)
+{
+	int32_t	data = value;
+	int32_t	size = sizeof(data);
+	bufferData((void *) &size, sizeof(size));
+	bufferData((void *) &data, sizeof(data));
+}
+
+/*
+ * adds column with inti64 value
+ */
+static void
+addInt64Column(int64_t value)
+{
+	int64_t	data = value;
+	int32_t	size = sizeof(data);
+	bufferData((void *) &size, sizeof(size));
+	bufferData((void *) &data, sizeof(data));
+}
+
+/*
+ * adds column with char value
+ */
+static void
+addCharColumn(char *value)
+{
+	int32_t	size = strlen(value);
+	bufferData((void *) &size, sizeof(size));
+	bufferCharData(value, size);
+}
+
+/*
+ * Starts communication with server for COPY FROM BINARY statement
+ */
+static void
+sendBinaryCopyHeader(PGconn *con)
+{
+	char header[] = {'P','G','C','O','P','Y','\n','\377','\r','\n','\0',
+					 '\0','\0','\0','\0',
+					 '\0','\0','\0','\0' };
+
+	PQputCopyData(con, header, 19);
+}
+
+/*
+ * Finishes communication with server for COPY FROM BINARY statement
+ */
+static void
+sendBinaryCopyTrailer(PGconn *con)
+{
+	static char trailer[] = { 0xFF, 0xFF };
+
+	PQputCopyData(con, trailer, 2);
+}
+
+/*
+ * Flashes current buffer over network if needed
+ */
+static void
+flushBuffer(PGconn *con, PGresult *res, int16_t row_len)
+{
+	if (bin_copy_buffer_length + row_len > BIN_COPY_BUF_SIZE)
+	{
+		/* flush current buffer */
+		if (PQresultStatus(res) == PGRES_COPY_IN)
+			PQputCopyData(con, (char *) bin_copy_buffer, bin_copy_buffer_length);
+		bin_copy_buffer_length = 0;
+	}
+}
+
+/*
+ * Sends current branch row to buffer
+ */
+static void
+initBranchBinary(PGconn *con, PGresult *res, int64_t curr, int32_t parent)
+{
+	/*
+	 * Each row has following extra bytes:
+	 * - 2 bytes for number of columns
+	 * - 4 bytes as length for each column
+	 */
+	int16_t	max_row_len =  35 + 2 + 4*3; /* max row size is 32 */
+
+	flushBuffer(con, res, max_row_len);
+
+	addColumnCounter(2);
+
+	addInt32Column(curr + 1);
+	addInt32Column(0);
+}
+
+/*
+ * Sends current teller row to buffer
+ */
+static void
+initTellerBinary(PGconn *con, PGresult *res, int64_t curr, int32_t parent)
+{
+	/*
+	 * Each row has following extra bytes:
+	 * - 2 bytes for number of columns
+	 * - 4 bytes as length for each column
+	 */
+	int16_t	max_row_len =  40 + 2 + 4*4; /* max row size is 40 */
+
+	flushBuffer(con, res, max_row_len);
+
+	addColumnCounter(3);
+
+	addInt32Column(curr + 1);
+	addInt32Column(curr / parent + 1);
+	addInt32Column(0);
+}
+
+/*
+ * Sends current account row to buffer
+ */
+static void
+initAccountBinary(PGconn *con, PGresult *res, int64_t curr, int32_t parent)
+{
+	/*
+	 * Each row has following extra bytes:
+	 * - 2 bytes for number of columns
+	 * - 4 bytes as length for each column
+	 */
+	int16_t	max_row_len = 250 + 2 + 4*4; /* max row size is 250 for int64 */
+
+	flushBuffer(con, res, max_row_len);
+
+	addColumnCounter(3);
+
+	if (scale <= SCALE_32BIT_THRESHOLD)
+		addInt32Column(curr + 1);
+	else
+		addInt64Column(curr);
+
+	addInt32Column(curr / parent + 1);
+	addInt32Column(0);
+}
+
+/*
+ * Universal wrapper for sending data in binary format
+ */
+static void
+initPopulateTableBinary(PGconn *con, char *table, char *columns,
+						int64_t base, initRowMethodBin init_row)
+{
+	int			 n;
+	PGresult	*res;
+	char		 copy_statement[256];
+	const char	*copy_statement_fmt = "copy %s (%s) from stdin (format binary)";
+	int64_t		 total = base * scale;
+
+	bin_copy_buffer_length = 0;
+
+	/* Use COPY with FREEZE on v14 and later for all ordinary tables */
+	if ((PQserverVersion(con) >= 140000) &&
+		get_table_relkind(con, table) == RELKIND_RELATION)
+		copy_statement_fmt = "copy %s (%s) from stdin with (format binary, freeze on)";
+
+	n = pg_snprintf(copy_statement, sizeof(copy_statement), copy_statement_fmt, table, columns);
+	if (n >= sizeof(copy_statement))
+		pg_fatal("invalid buffer size: must be at least %d characters long", n);
+	else if (n == -1)
+		pg_fatal("invalid format string");
+
+	res = PQexec(con, copy_statement);
+
+	if (PQresultStatus(res) != PGRES_COPY_IN)
+		pg_fatal("unexpected copy in result: %s", PQerrorMessage(con));
+	PQclear(res);
+
+
+	sendBinaryCopyHeader(con);
+
+	for (int64_t i = 0; i < total; i++)
+	{
+		init_row(con, res, i, base);
+	}
+
+	if (PQresultStatus(res) == PGRES_COPY_IN)
+		PQputCopyData(con, (char *) bin_copy_buffer, bin_copy_buffer_length);
+	else
+		fprintf(stderr, "Unexpected mode %d instead of %d\n", PQresultStatus(res), PGRES_COPY_IN);
+
+	sendBinaryCopyTrailer(con);
+
+	if (PQresultStatus(res) == PGRES_COPY_IN)
+	{
+		if (PQputCopyEnd(con, NULL) == 1) /* success */
+		{
+			res = PQgetResult(con);
+			if (PQresultStatus(res) != PGRES_COMMAND_OK)
+				fprintf(stderr, "Error: %s\n", PQerrorMessage(con));
+			PQclear(res);
+		}
+		else
+			fprintf(stderr, "Error: %s\n", PQerrorMessage(con));
+	}
+}
+
+/*
+ * Wrapper for binary data load
+ */
+static void
+initGenerateDataClientSideBinary(PGconn *con)
+{
+
+	fprintf(stderr, "BINARY mode...\n");
+
+	bin_copy_buffer = pg_malloc(BIN_COPY_BUF_SIZE);
+	bin_copy_buffer_length = 0;
+
+	/*
+	 * we do all of this in one transaction to enable the backend's
+	 * data-loading optimizations
+	 */
+	executeStatement(con, "begin");
+
+	/* truncate away any old data */
+	initTruncateTables(con);
+
+	initPopulateTableBinary(con, "pgbench_branches", "bid, bbalance",
+							nbranches, initBranchBinary);
+	initPopulateTableBinary(con, "pgbench_tellers",  "tid, bid, tbalance",
+							ntellers,  initTellerBinary);
+	initPopulateTableBinary(con, "pgbench_accounts", "aid, bid, abalance",
+							naccounts, initAccountBinary);
+
+	executeStatement(con, "commit");
+
+	pg_free(bin_copy_buffer);
+}
+
+/*
+ * Fill the standard tables with some data generated and sent from the client.
+ */
+static void
+initGenerateDataClientSide(PGconn *con)
+{
+	fprintf(stderr, "generating data (client-side) in ");
+
+	switch (data_generation_type)
+	{
+		case GEN_TYPE_COPY_ORIGINAL:
+			initGenerateDataClientSideText(con);
+			break;
+		case GEN_TYPE_COPY_BINARY:
+			initGenerateDataClientSideBinary(con);
+			break;
+	}
+}
+
 /*
  * Generating data via INSERT .. SELECT .. FROM generate_series
  * whole dataset in single transaction
@@ -5500,14 +5872,10 @@ checkInitSteps(const char *initialize_steps)
 
 		switch (*step)
 		{
+			case 'g':
+			case 'C':
 			case 'G':
-				data_init_type++;
-				data_generation_type = *step;
-				break;
 			case 'i':
-				data_init_type++;
-				data_generation_type = *step;
-				break;
 			case 'I':
 				data_init_type++;
 				data_generation_type = *step;
@@ -5555,6 +5923,7 @@ runInitSteps(const char *initialize_steps)
 				initCreateTables(con);
 				break;
 			case 'g':
+			case 'C':
 				op = "client-side generate";
 				initGenerateDataClientSide(con);
 				break;
-- 
2.43.0


From 4aa0ac05765edf6b5f0c13e18ac677287ce78206 Mon Sep 17 00:00:00 2001
From: Fujii Masao <fujii@postgresql.org>
Date: Fri, 14 Nov 2025 22:40:39 +0900
Subject: [PATCH 07/10] pgbench: Fix assertion failure with multiple
 \syncpipeline in pipeline mode.

Previously, when pgbench ran a custom script that triggered retriable errors
(e.g., deadlocks) followed by multiple \syncpipeline commands in pipeline mode,
the following assertion failure could occur:

    Assertion failed: (res == ((void*)0)), function discardUntilSync, file pgbench.c, line 3594.

The issue was that discardUntilSync() assumed a pipeline sync result
(PGRES_PIPELINE_SYNC) would always be followed by either another sync result
or NULL. This assumption was incorrect: when multiple sync requests were sent,
a sync result could instead be followed by another result type. In such cases,
discardUntilSync() mishandled the results, leading to the assertion failure.

This commit fixes the issue by making discardUntilSync() correctly handle cases
where a pipeline sync result is followed by other result types. It now continues
discarding results until another pipeline sync followed by NULL is reached.

Backpatched to v17, where support for \syncpipeline command in pgbench was
introduced.

Author: Yugo Nagata <nagata@sraoss.co.jp>
Reviewed-by: Chao Li <lic@highgo.com>
Reviewed-by: Fujii Masao <masao.fujii@gmail.com>
Discussion: https://postgr.es/m/20251111105037.f3fc554616bc19891f926c5b@sraoss.co.jp
Backpatch-through: 17
---
 src/bin/pgbench/pgbench.c | 39 ++++++++++++++++++++++++++++-----------
 1 file changed, 28 insertions(+), 11 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index d8764ba6fe0..a425176ecdc 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -3563,14 +3563,18 @@ doRetry(CState *st, pg_time_usec_t *now)
 }
 
 /*
- * Read results and discard it until a sync point.
+ * Read and discard results until the last sync point.
  */
 static int
 discardUntilSync(CState *st)
 {
 	bool		received_sync = false;
 
-	/* send a sync */
+	/*
+	 * Send a Sync message to ensure at least one PGRES_PIPELINE_SYNC is
+	 * received and to avoid an infinite loop, since all earlier ones may have
+	 * already been received.
+	 */
 	if (!PQpipelineSync(st->con))
 	{
 		pg_log_error("client %d aborted: failed to send a pipeline sync",
@@ -3578,29 +3582,42 @@ discardUntilSync(CState *st)
 		return 0;
 	}
 
-	/* receive PGRES_PIPELINE_SYNC and null following it */
+	/*
+	 * Continue reading results until the last sync point, i.e., until
+	 * reaching null just after PGRES_PIPELINE_SYNC.
+	 */
 	for (;;)
 	{
 		PGresult   *res = PQgetResult(st->con);
 
+		if (PQstatus(st->con) == CONNECTION_BAD)
+		{
+			pg_log_error("client %d aborted while rolling back the transaction after an error; perhaps the backend died while processing",
+						 st->id);
+			PQclear(res);
+			return 0;
+		}
+
 		if (PQresultStatus(res) == PGRES_PIPELINE_SYNC)
 			received_sync = true;
-		else if (received_sync)
+		else if (received_sync && res == NULL)
 		{
-			/*
-			 * PGRES_PIPELINE_SYNC must be followed by another
-			 * PGRES_PIPELINE_SYNC or NULL; otherwise, assert failure.
-			 */
-			Assert(res == NULL);
-
 			/*
 			 * Reset ongoing sync count to 0 since all PGRES_PIPELINE_SYNC
 			 * results have been discarded.
 			 */
 			st->num_syncs = 0;
-			PQclear(res);
 			break;
 		}
+		else
+		{
+			/*
+			 * If a PGRES_PIPELINE_SYNC is followed by something other than
+			 * PGRES_PIPELINE_SYNC or NULL, another PGRES_PIPELINE_SYNC will
+			 * appear later. Reset received_sync to false to wait for it.
+			 */
+			received_sync = false;
+		}
 		PQclear(res);
 	}
 
-- 
2.43.0


From 9c4f19055597e9adb25e65c2aa8bedf20a09e13d Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Fri, 21 Nov 2025 19:05:58 +0700
Subject: [PATCH 08/10] Setting empty string as default value in filler column

---
 src/bin/pgbench/pgbench.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 967f6ce6984..03b5e5c28f0 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -4985,26 +4985,26 @@ initCreateTables(PGconn *con)
 	static const struct ddlinfo DDLs[] = {
 		{
 			"pgbench_history",
-			"tid int,bid int,aid    int,delta int,mtime timestamp,filler char(22)",
-			"tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)",
+			"tid int,bid int,aid    int,delta int,mtime timestamp,filler char(22) default ''",
+			"tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22) default ''",
 			0
 		},
 		{
 			"pgbench_tellers",
-			"tid int not null,bid int,tbalance int,filler char(84)",
-			"tid int not null,bid int,tbalance int,filler char(84)",
+			"tid int not null,bid int,tbalance int,filler char(84) default ''",
+			"tid int not null,bid int,tbalance int,filler char(84) default ''",
 			1
 		},
 		{
 			"pgbench_accounts",
-			"aid    int not null,bid int,abalance int,filler char(84)",
-			"aid bigint not null,bid int,abalance int,filler char(84)",
+			"aid    int not null,bid int,abalance int,filler char(84) default ''",
+			"aid bigint not null,bid int,abalance int,filler char(84) default ''",
 			1
 		},
 		{
 			"pgbench_branches",
-			"bid int not null,bbalance int,filler char(88)",
-			"bid int not null,bbalance int,filler char(88)",
+			"bid int not null,bbalance int,filler char(88) default ''",
+			"bid int not null,bbalance int,filler char(88) default ''",
 			1
 		}
 	};
-- 
2.43.0


From dcb85d26f8132eaaf9d096e814b9bda49db7d478 Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Fri, 21 Nov 2025 20:06:24 +0700
Subject: [PATCH 09/10] Switching COPY FROM BINARY ti run in multiple
 transactions

---
 src/bin/pgbench/pgbench.c | 27 ++++++++++++++++-----------
 1 file changed, 16 insertions(+), 11 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 03b5e5c28f0..6b89007a63b 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -5496,20 +5496,20 @@ initAccountBinary(PGconn *con, PGresult *res, int64_t curr, int32_t parent)
  */
 static void
 initPopulateTableBinary(PGconn *con, char *table, char *columns,
-						int64_t base, initRowMethodBin init_row)
+						int counter, int64_t base, initRowMethodBin init_row)
 {
 	int			 n;
 	PGresult	*res;
 	char		 copy_statement[256];
 	const char	*copy_statement_fmt = "copy %s (%s) from stdin (format binary)";
-	int64_t		 total = base * scale;
+	int64_t		 start = base * counter;
 
 	bin_copy_buffer_length = 0;
 
 	/* Use COPY with FREEZE on v14 and later for all ordinary tables */
 	if ((PQserverVersion(con) >= 140000) &&
 		get_table_relkind(con, table) == RELKIND_RELATION)
-		copy_statement_fmt = "copy %s (%s) from stdin with (format binary, freeze on)";
+		copy_statement_fmt = "copy %s (%s) from stdin with (format binary)";
 
 	n = pg_snprintf(copy_statement, sizeof(copy_statement), copy_statement_fmt, table, columns);
 	if (n >= sizeof(copy_statement))
@@ -5526,7 +5526,7 @@ initPopulateTableBinary(PGconn *con, char *table, char *columns,
 
 	sendBinaryCopyHeader(con);
 
-	for (int64_t i = 0; i < total; i++)
+	for (int64_t i = start; i < start + base; i++)
 	{
 		init_row(con, res, i, base);
 	}
@@ -5573,15 +5573,20 @@ initGenerateDataClientSideBinary(PGconn *con)
 	/* truncate away any old data */
 	initTruncateTables(con);
 
-	initPopulateTableBinary(con, "pgbench_branches", "bid, bbalance",
-							nbranches, initBranchBinary);
-	initPopulateTableBinary(con, "pgbench_tellers",  "tid, bid, tbalance",
-							ntellers,  initTellerBinary);
-	initPopulateTableBinary(con, "pgbench_accounts", "aid, bid, abalance",
-							naccounts, initAccountBinary);
-
 	executeStatement(con, "commit");
 
+	for (int i = 0; i < scale; i++)
+	{
+		initPopulateTableBinary(con, "pgbench_branches", "bid, bbalance",
+								i, nbranches, initBranchBinary);
+		initPopulateTableBinary(con, "pgbench_tellers",  "tid, bid, tbalance",
+								i, ntellers,  initTellerBinary);
+		initPopulateTableBinary(con, "pgbench_accounts", "aid, bid, abalance",
+								i, naccounts, initAccountBinary);
+
+		executeStatement(con, "commit");
+	}
+
 	pg_free(bin_copy_buffer);
 }
 
-- 
2.43.0


From b8e28881225234fd00b55235bc60fad2dc60b544 Mon Sep 17 00:00:00 2001
From: Boris Mironov <boris.mironov@gmail.com>
Date: Sat, 22 Nov 2025 17:06:00 +0700
Subject: [PATCH 10/10] Adding tests for new modes of data generation

---
 src/bin/pgbench/pgbench.c                    | 21 ++++----
 src/bin/pgbench/t/001_pgbench_with_server.pl | 52 +++++++++++++++++---
 2 files changed, 56 insertions(+), 17 deletions(-)

diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c
index 6b89007a63b..dd4e5d5e056 100644
--- a/src/bin/pgbench/pgbench.c
+++ b/src/bin/pgbench/pgbench.c
@@ -164,7 +164,7 @@ typedef struct socket_set
 #define ALL_INIT_STEPS "dtgCGiIvpf"	/* all possible steps */
 
 #define LOG_STEP_SECONDS	5	/* seconds between log messages */
-#define DEFAULT_NXACTS	10		/* default nxacts */
+#define DEFAULT_NXACTS		10	/* default nxacts */
 
 #define MIN_GAUSSIAN_PARAM		2.0 /* minimum parameter for gauss */
 
@@ -192,7 +192,7 @@ static int	scale = 1;
 /*
  * mode of data generation to use
  */
-static char	data_generation_type = '?';
+static char	data_generation_type = GEN_TYPE_COPY_ORIGINAL;
 
 /*
  * COPY FROM BINARY execution buffer
@@ -4985,26 +4985,26 @@ initCreateTables(PGconn *con)
 	static const struct ddlinfo DDLs[] = {
 		{
 			"pgbench_history",
-			"tid int,bid int,aid    int,delta int,mtime timestamp,filler char(22) default ''",
-			"tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22) default ''",
+			"tid int,bid int,aid    int,delta int,mtime timestamp,filler char(22) default '?'",
+			"tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22) default '?'",
 			0
 		},
 		{
 			"pgbench_tellers",
-			"tid int not null,bid int,tbalance int,filler char(84) default ''",
-			"tid int not null,bid int,tbalance int,filler char(84) default ''",
+			"tid int not null,bid int,tbalance int,filler char(84)",
+			"tid int not null,bid int,tbalance int,filler char(84)",
 			1
 		},
 		{
 			"pgbench_accounts",
-			"aid    int not null,bid int,abalance int,filler char(84) default ''",
-			"aid bigint not null,bid int,abalance int,filler char(84) default ''",
+			"aid    int not null,bid int,abalance int,filler char(84) default '?'",
+			"aid bigint not null,bid int,abalance int,filler char(84) default '?'",
 			1
 		},
 		{
 			"pgbench_branches",
-			"bid int not null,bbalance int,filler char(88) default ''",
-			"bid int not null,bbalance int,filler char(88) default ''",
+			"bid int not null,bbalance int,filler char(88)",
+			"bid int not null,bbalance int,filler char(88)",
 			1
 		}
 	};
@@ -7837,6 +7837,7 @@ main(int argc, char **argv)
 			}
 		}
 
+		checkInitSteps(initialize_steps);
 		runInitSteps(initialize_steps);
 		exit(0);
 	}
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 581e9af7907..a377048ead1 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -16,25 +16,30 @@ sub check_data_state
 	local $Test::Builder::Level = $Test::Builder::Level + 1;
 	my $node = shift;
 	my $type = shift;
+	my $sql_result;
 
-	my $sql_result = $node->safe_psql('postgres',
-		'SELECT count(*) AS null_count FROM pgbench_accounts WHERE filler IS NULL LIMIT 10;'
-	);
-	is($sql_result, '0',
-		"$type: filler column of pgbench_accounts has no NULL data");
 	$sql_result = $node->safe_psql('postgres',
 		'SELECT count(*) AS null_count FROM pgbench_branches WHERE filler IS NULL;'
 	);
 	is($sql_result, '1',
 		"$type: filler column of pgbench_branches has only NULL data");
+
 	$sql_result = $node->safe_psql('postgres',
 		'SELECT count(*) AS null_count FROM pgbench_tellers WHERE filler IS NULL;'
 	);
 	is($sql_result, '10',
 		"$type: filler column of pgbench_tellers has only NULL data");
+
+	$sql_result = $node->safe_psql('postgres',
+		'SELECT count(*) AS null_count FROM pgbench_accounts WHERE filler IS NULL LIMIT 10;'
+	);
+	is($sql_result, '0',
+		"$type: filler column of pgbench_accounts has no NULL data");
+
 	$sql_result = $node->safe_psql('postgres',
 		'SELECT count(*) AS data_count FROM pgbench_history;');
-	is($sql_result, '0', "$type: pgbench_history has no data");
+	is($sql_result, '0',
+		"$type: pgbench_history has no data");
 }
 
 # start a pgbench specific server
@@ -125,7 +130,7 @@ $node->pgbench(
 	'pgbench scale 1 initialization',);
 
 # Check data state, after client-side data generation.
-check_data_state($node, 'client-side');
+check_data_state($node, 'client-side (default options)');
 
 # Again, with all possible options
 $node->pgbench(
@@ -143,6 +148,7 @@ $node->pgbench(
 		qr{done in \d+\.\d\d s }
 	],
 	'pgbench scale 1 initialization');
+check_data_state($node, 'client-side (all options)');
 
 # Test interaction of --init-steps with legacy step-selection options
 $node->pgbench(
@@ -164,6 +170,38 @@ $node->pgbench(
 # Check data state, after server-side data generation.
 check_data_state($node, 'server-side');
 
+# Test server-side generation with UNNEST
+$node->pgbench(
+	'--initialize --init-steps=dtI',
+	0,
+	[qr{^$}],
+	[
+		qr{dropping old tables},
+		qr{creating tables},
+		qr{generating data \(server-side\)},
+		qr{done in \d+\.\d\d s }
+	],
+	'pgbench --init-steps server-side UNNEST');
+
+# Check data state, after server-side data generation.
+check_data_state($node, 'server-side (unnest)');
+
+# Test server-side generation with UNNEST
+$node->pgbench(
+	'--initialize --init-steps=dtC',
+	0,
+	[qr{^$}],
+	[
+		qr{dropping old tables},
+		qr{creating tables},
+		qr{generating data \(client-side\)},
+		qr{done in \d+\.\d\d s }
+	],
+	'pgbench --init-steps client-side BINARY');
+
+# Check data state, after server-side data generation.
+check_data_state($node, 'client-side (binary)');
+
 # Run all builtin scripts, for a few transactions each
 $node->pgbench(
 	'--transactions=5 -Dfoo=bla --client=2 --protocol=simple --builtin=t'
-- 
2.43.0

