From 76ee9f2037080a58e53ad2b7fd5cf9d0f86e15e9 Mon Sep 17 00:00:00 2001
From: David Rowley <dgrowley@gmail.com>
Date: Fri, 7 Nov 2025 18:03:09 +1300
Subject: [PATCH v11 2/2] fixup! v10 parallel tid range scan

---
 src/backend/access/heap/heapam.c           |  14 +--
 src/backend/access/table/tableam.c         | 134 ++++++++++++---------
 src/backend/executor/execParallel.c        |   6 +-
 src/backend/executor/nodeTidrangescan.c    |  52 ++------
 src/backend/optimizer/path/costsize.c      |   6 +-
 src/backend/optimizer/path/tidpath.c       |   6 +-
 src/include/access/tableam.h               |   8 +-
 src/test/regress/expected/tidrangescan.out |  29 ++---
 src/test/regress/sql/tidrangescan.sql      |  31 +++--
 9 files changed, 136 insertions(+), 150 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index f1693e79c31..1ad442c1b2c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -258,7 +258,9 @@ heap_scan_stream_read_next_parallel(ReadStream *stream,
 		/* parallel scan */
 		table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
 												 scan->rs_parallelworkerdata,
-												 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
+												 (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel,
+												 scan->rs_startblock,
+												 scan->rs_numblocks);
 
 		/* may return InvalidBlockNumber if there are no more blocks */
 		scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
@@ -490,16 +492,6 @@ heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlk
 
 	scan->rs_startblock = startBlk;
 	scan->rs_numblocks = numBlks;
-
-	/* set the limits in the ParallelBlockTableScanDesc, when present as leader */
-	if (scan->rs_base.rs_parallel != NULL && !IsParallelWorker())
-	{
-		ParallelBlockTableScanDesc bpscan;
-
-		bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
-		bpscan->phs_startblock = startBlk;
-		bpscan->phs_numblock = numBlks;
-	}
 }
 
 /*
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index baef7459b6b..9c3347ba12b 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -189,8 +189,8 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
 }
 
 TableScanDesc
-table_beginscan_parallel_tidrange(Relation relation, ParallelTableScanDesc pscan,
-		ItemPointerData * mintid, ItemPointerData * maxtid)
+table_beginscan_parallel_tidrange(Relation relation,
+								  ParallelTableScanDesc pscan)
 {
 	Snapshot	snapshot;
 	uint32		flags = SO_TYPE_TIDRANGESCAN | SO_ALLOW_PAGEMODE;
@@ -216,11 +216,6 @@ table_beginscan_parallel_tidrange(Relation relation, ParallelTableScanDesc pscan
 
 	sscan = relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
 											 pscan, flags);
-
-	/* Set the TID range if needed */
-	if (mintid && maxtid)
-		relation->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
-
 	return sscan;
 }
 
@@ -453,14 +448,22 @@ table_block_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
  *
  * Determine where the parallel seq scan should start.  This function may be
  * called many times, once by each parallel worker.  We must be careful only
- * to set the startblock once.
+ * to set the phs_startblock and phs_numblock fields once.
+ *
+ * Callers may optionally specify a non-InvalidBlockNumber value for
+ * 'startblock' to force the scan to start at the given page.  Likewise,
+ * 'numblocks' can be specified as a non-InvalidBlockNumber to limit the
+ * number of blocks to scan to that many blocks.
  */
 void
 table_block_parallelscan_startblock_init(Relation rel,
 										 ParallelBlockTableScanWorker pbscanwork,
-										 ParallelBlockTableScanDesc pbscan)
+										 ParallelBlockTableScanDesc pbscan,
+										 BlockNumber startblock,
+										 BlockNumber numblocks)
 {
 	BlockNumber sync_startpage = InvalidBlockNumber;
+	BlockNumber scan_nblocks;
 
 	/* Reset the state we use for controlling allocation size. */
 	memset(pbscanwork, 0, sizeof(*pbscanwork));
@@ -468,42 +471,36 @@ table_block_parallelscan_startblock_init(Relation rel,
 	StaticAssertStmt(MaxBlockNumber <= 0xFFFFFFFE,
 					 "pg_nextpower2_32 may be too small for non-standard BlockNumber width");
 
-	/*
-	 * We determine the chunk size based on the size of the relation. First we
-	 * split the relation into PARALLEL_SEQSCAN_NCHUNKS chunks but we then
-	 * take the next highest power of 2 number of the chunk size.  This means
-	 * we split the relation into somewhere between PARALLEL_SEQSCAN_NCHUNKS
-	 * and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks.
-	 */
-	pbscanwork->phsw_chunk_size = pg_nextpower2_32(Max(pbscan->phs_nblocks /
-													   PARALLEL_SEQSCAN_NCHUNKS, 1));
-
-	/*
-	 * Ensure we don't go over the maximum chunk size with larger tables. This
-	 * means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger
-	 * tables.  Too large a chunk size has been shown to be detrimental to
-	 * synchronous scan performance.
-	 */
-	pbscanwork->phsw_chunk_size = Min(pbscanwork->phsw_chunk_size,
-									  PARALLEL_SEQSCAN_MAX_CHUNK_SIZE);
-
 retry:
 	/* Grab the spinlock. */
 	SpinLockAcquire(&pbscan->phs_mutex);
 
 	/*
-	 * If the scan's startblock has not yet been initialized, we must do so
-	 * now.  If this is not a synchronized scan, we just start at block 0, but
-	 * if it is a synchronized scan, we must get the starting position from
-	 * the synchronized scan machinery.  We can't hold the spinlock while
-	 * doing that, though, so release the spinlock, get the information we
-	 * need, and retry.  If nobody else has initialized the scan in the
-	 * meantime, we'll fill in the value we fetched on the second time
-	 * through.
+	 * When the caller specified a limit on the number of blocks to scan, set
+	 * that in the ParallelBlockTableScanDesc, if it's not been done by
+	 * another worker already.
+	 */
+	if (numblocks != InvalidBlockNumber &&
+		pbscan->phs_numblock == InvalidBlockNumber)
+	{
+		pbscan->phs_numblock = numblocks;
+	}
+
+	/*
+	 * If the scan's phs_startblock has not yet been initialized, we must do
+	 * so now.  If a startblock was specified, start there, otherwise if this
+	 * is not a synchronized scan, we just start at block 0, but if it is a
+	 * synchronized scan, we must get the starting position from the
+	 * synchronized scan machinery.  We can't hold the spinlock while doing
+	 * that, though, so release the spinlock, get the information we need, and
+	 * retry.  If nobody else has initialized the scan in the meantime, we'll
+	 * fill in the value we fetched on the second time through.
 	 */
 	if (pbscan->phs_startblock == InvalidBlockNumber)
 	{
-		if (!pbscan->base.phs_syncscan)
+		if (startblock != InvalidBlockNumber)
+			pbscan->phs_startblock = startblock;
+		else if (!pbscan->base.phs_syncscan)
 			pbscan->phs_startblock = 0;
 		else if (sync_startpage != InvalidBlockNumber)
 			pbscan->phs_startblock = sync_startpage;
@@ -515,6 +512,34 @@ retry:
 		}
 	}
 	SpinLockRelease(&pbscan->phs_mutex);
+
+	/*
+	 * Figure out how many blocks we're going to scan; either all of them, or
+	 * just phs_numblock's worth, if a limit has been imposed.
+	 */
+	if (pbscan->phs_numblock == InvalidBlockNumber)
+		scan_nblocks = pbscan->phs_nblocks;
+	else
+		scan_nblocks = pbscan->phs_numblock;
+
+	/*
+	 * We determine the chunk size based on scan_nblocks.  First we split
+	 * scan_nblocks into PARALLEL_SEQSCAN_NCHUNKS chunks then we calculate the
+	 * next highest power of 2 number of the result.  This means we split the
+	 * blocks we're scanning into somewhere between PARALLEL_SEQSCAN_NCHUNKS
+	 * and PARALLEL_SEQSCAN_NCHUNKS / 2 chunks.
+	 */
+	pbscanwork->phsw_chunk_size = pg_nextpower2_32(Max(scan_nblocks /
+													   PARALLEL_SEQSCAN_NCHUNKS, 1));
+
+	/*
+	 * Ensure we don't go over the maximum chunk size with larger tables. This
+	 * means we may get much more than PARALLEL_SEQSCAN_NCHUNKS for larger
+	 * tables.  Too large a chunk size has been shown to be detrimental to
+	 * synchronous scan performance.
+	 */
+	pbscanwork->phsw_chunk_size = Min(pbscanwork->phsw_chunk_size,
+									  PARALLEL_SEQSCAN_MAX_CHUNK_SIZE);
 }
 
 /*
@@ -530,6 +555,7 @@ table_block_parallelscan_nextpage(Relation rel,
 								  ParallelBlockTableScanWorker pbscanwork,
 								  ParallelBlockTableScanDesc pbscan)
 {
+	BlockNumber scan_nblocks;
 	BlockNumber page;
 	uint64		nallocated;
 
@@ -550,7 +576,7 @@ table_block_parallelscan_nextpage(Relation rel,
 	 *
 	 * Here we name these ranges of blocks "chunks".  The initial size of
 	 * these chunks is determined in table_block_parallelscan_startblock_init
-	 * based on the size of the relation.  Towards the end of the scan, we
+	 * based on the number of blocks to scan.  Towards the end of the scan, we
 	 * start making reductions in the size of the chunks in order to attempt
 	 * to divide the remaining work over all the workers as evenly as
 	 * possible.
@@ -567,17 +593,23 @@ table_block_parallelscan_nextpage(Relation rel,
 	 * phs_nallocated counter will exceed rs_nblocks, because workers will
 	 * still increment the value, when they try to allocate the next block but
 	 * all blocks have been allocated already. The counter must be 64 bits
-	 * wide because of that, to avoid wrapping around when rs_nblocks is close
-	 * to 2^32.
+	 * wide because of that, to avoid wrapping around when scan_nblocks is
+	 * close to 2^32.
 	 *
 	 * The actual block to return is calculated by adding the counter to the
-	 * starting block number, modulo nblocks.
+	 * starting block number, modulo phs_nblocks.
 	 */
 
+	/* First, figure out how many blocks we're planning on scanning */
+	if (pbscan->phs_numblock == InvalidBlockNumber)
+		scan_nblocks = pbscan->phs_nblocks;
+	else
+		scan_nblocks = pbscan->phs_numblock;
+
 	/*
-	 * First check if we have any remaining blocks in a previous chunk for
-	 * this worker.  We must consume all of the blocks from that before we
-	 * allocate a new chunk to the worker.
+	 * Now check if we have any remaining blocks in a previous chunk for this
+	 * worker.  We must consume all of the blocks from that before we allocate
+	 * a new chunk to the worker.
 	 */
 	if (pbscanwork->phsw_chunk_remaining > 0)
 	{
@@ -599,7 +631,7 @@ table_block_parallelscan_nextpage(Relation rel,
 		 * chunk size set to 1.
 		 */
 		if (pbscanwork->phsw_chunk_size > 1 &&
-			pbscanwork->phsw_nallocated > pbscan->phs_nblocks -
+			pbscanwork->phsw_nallocated > scan_nblocks -
 			(pbscanwork->phsw_chunk_size * PARALLEL_SEQSCAN_RAMPDOWN_CHUNKS))
 			pbscanwork->phsw_chunk_size >>= 1;
 
@@ -614,15 +646,9 @@ table_block_parallelscan_nextpage(Relation rel,
 		pbscanwork->phsw_chunk_remaining = pbscanwork->phsw_chunk_size - 1;
 	}
 
-	/*
-	 * Check if we've allocated every block in the relation, or if we've
-	 * reached the limit imposed by pbscan->phs_numblock (if set).
-	 */
-	if (nallocated >= pbscan->phs_nblocks)
-		page = InvalidBlockNumber; /* all blocks have been allocated */
-	else if (pbscan->phs_numblock != InvalidBlockNumber &&
-			 nallocated >= pbscan->phs_numblock)
-		page = InvalidBlockNumber; /* upper scan limit reached */
+	/* Check if we've run out of blocks to scan */
+	if (nallocated >= scan_nblocks)
+		page = InvalidBlockNumber;	/* all blocks have been allocated */
 	else
 		page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
 
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 7b1eb2e82c7..0125464d942 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -40,8 +40,8 @@
 #include "executor/nodeSeqscan.h"
 #include "executor/nodeSort.h"
 #include "executor/nodeSubplan.h"
-#include "executor/tqueue.h"
 #include "executor/nodeTidrangescan.h"
+#include "executor/tqueue.h"
 #include "jit/jit.h"
 #include "nodes/nodeFuncs.h"
 #include "pgstat.h"
@@ -502,7 +502,7 @@ ExecParallelInitializeDSM(PlanState *planstate,
 		case T_TidRangeScanState:
 			if (planstate->plan->parallel_aware)
 				ExecTidRangeScanInitializeDSM((TidRangeScanState *) planstate,
-										 	  d->pcxt);
+											  d->pcxt);
 			break;
 		case T_AppendState:
 			if (planstate->plan->parallel_aware)
@@ -1008,7 +1008,7 @@ ExecParallelReInitializeDSM(PlanState *planstate,
 		case T_TidRangeScanState:
 			if (planstate->plan->parallel_aware)
 				ExecTidRangeScanReInitializeDSM((TidRangeScanState *) planstate,
-										   	    pcxt);
+												pcxt);
 			break;
 		case T_AppendState:
 			if (planstate->plan->parallel_aware)
diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c
index 39088755e90..03ce8525f89 100644
--- a/src/backend/executor/nodeTidrangescan.c
+++ b/src/backend/executor/nodeTidrangescan.c
@@ -250,13 +250,9 @@ TidRangeNext(TidRangeScanState *node)
 		}
 		else
 		{
-			/* rescan with the updated TID range only in non-parallel mode */
-			if (scandesc->rs_parallel == NULL)
-			{
-				/* rescan with the updated TID range */
-				table_rescan_tidrange(scandesc, &node->trss_mintid,
-									  &node->trss_maxtid);
-			}
+			/* rescan with the updated TID range */
+			table_rescan_tidrange(scandesc, &node->trss_mintid,
+								  &node->trss_maxtid);
 		}
 
 		node->trss_inScan = true;
@@ -419,6 +415,7 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags)
 	 */
 	return tidrangestate;
 }
+
 /* ----------------------------------------------------------------
  *						Parallel Scan Support
  * ----------------------------------------------------------------
@@ -460,19 +457,9 @@ ExecTidRangeScanInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt)
 								  pscan,
 								  estate->es_snapshot);
 	shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan);
-
-	/*
-	 * Initialize parallel scan descriptor with given TID range if it can be
-	 * evaluated successfully.
-	 */
-	if (TidRangeEval(node))
-		node->ss.ss_currentScanDesc =
-			table_beginscan_parallel_tidrange(node->ss.ss_currentRelation, pscan,
-					&node->trss_mintid, &node->trss_maxtid);
-	else
-		node->ss.ss_currentScanDesc =
-			table_beginscan_parallel_tidrange(node->ss.ss_currentRelation, pscan,
-					NULL, NULL);
+	node->ss.ss_currentScanDesc =
+		table_beginscan_parallel_tidrange(node->ss.ss_currentRelation,
+										  pscan);
 }
 
 /* ----------------------------------------------------------------
@@ -483,21 +470,12 @@ ExecTidRangeScanInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt)
  */
 void
 ExecTidRangeScanReInitializeDSM(TidRangeScanState *node,
-						   ParallelContext *pcxt)
+								ParallelContext *pcxt)
 {
 	ParallelTableScanDesc pscan;
 
 	pscan = node->ss.ss_currentScanDesc->rs_parallel;
 	table_parallelscan_reinitialize(node->ss.ss_currentRelation, pscan);
-
-	/* Set the new TID range if it can be evaluated successfully */
-	if (TidRangeEval(node))
-		node->ss.ss_currentRelation->rd_tableam->scan_set_tidrange(
-				node->ss.ss_currentScanDesc, &node->trss_mintid,
-				&node->trss_maxtid);
-	else
-		node->ss.ss_currentRelation->rd_tableam->scan_set_tidrange(
-					node->ss.ss_currentScanDesc, NULL, NULL);
 }
 
 /* ----------------------------------------------------------------
@@ -508,18 +486,12 @@ ExecTidRangeScanReInitializeDSM(TidRangeScanState *node,
  */
 void
 ExecTidRangeScanInitializeWorker(TidRangeScanState *node,
-							ParallelWorkerContext *pwcxt)
+								 ParallelWorkerContext *pwcxt)
 {
 	ParallelTableScanDesc pscan;
 
 	pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
-
-	if (TidRangeEval(node))
-		node->ss.ss_currentScanDesc =
-			table_beginscan_parallel_tidrange(node->ss.ss_currentRelation, pscan,
-					&node->trss_mintid, &node->trss_maxtid);
-	else
-		node->ss.ss_currentScanDesc =
-			table_beginscan_parallel_tidrange(node->ss.ss_currentRelation, pscan,
-					NULL, NULL);
+	node->ss.ss_currentScanDesc =
+		table_beginscan_parallel_tidrange(node->ss.ss_currentRelation,
+										  pscan);
 }
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 01976226d19..5a7283bd2f5 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1371,7 +1371,11 @@ cost_tidrangescan(Path *path, PlannerInfo *root,
 
 	/*
 	 * The first page in a range requires a random seek, but each subsequent
-	 * page is just a normal sequential page read.
+	 * page is just a normal sequential page read. NOTE: it's desirable for
+	 * TID Range Scans to cost more than the equivalent Sequential Scans,
+	 * because Seq Scans have some performance advantages such as scan
+	 * synchronization, and we'd prefer one of them to be picked unless a TID
+	 * Range Scan really is better.
 	 */
 	ntuples = selectivity * baserel->tuples;
 	nseqpages = pages - 1.0;
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index e48c85833e7..3ddbc10bbdf 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -47,7 +47,6 @@
 #include "optimizer/pathnode.h"
 #include "optimizer/paths.h"
 #include "optimizer/restrictinfo.h"
-#include "optimizer/cost.h"
 
 
 /*
@@ -491,9 +490,8 @@ ec_member_matches_ctid(PlannerInfo *root, RelOptInfo *rel,
 
 /*
  * create_tidscan_paths
- *	  Create paths corresponding to direct TID scans of the given rel.
- *
- *	  Candidate paths are added to the rel's pathlist (using add_path).
+ *	  Create paths corresponding to direct TID scans of the given rel and add
+ *	  them to the corresponding path list via add_path or add_partial_path.
  */
 bool
 create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index 8e97fc5f0be..5ef8de3f141 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -1138,9 +1138,7 @@ extern TableScanDesc table_beginscan_parallel(Relation relation,
  * Caller must hold a suitable lock on the relation.
  */
 extern TableScanDesc table_beginscan_parallel_tidrange(Relation relation,
-													   ParallelTableScanDesc pscan,
-													   ItemPointerData * mintid,
-													   ItemPointerData * maxtid);
+													   ParallelTableScanDesc pscan);
 
 /*
  * Restart a parallel scan.  Call this in the leader process.  Caller is
@@ -2040,7 +2038,9 @@ extern BlockNumber table_block_parallelscan_nextpage(Relation rel,
 													 ParallelBlockTableScanDesc pbscan);
 extern void table_block_parallelscan_startblock_init(Relation rel,
 													 ParallelBlockTableScanWorker pbscanwork,
-													 ParallelBlockTableScanDesc pbscan);
+													 ParallelBlockTableScanDesc pbscan,
+													 BlockNumber startblock,
+													 BlockNumber numblocks);
 
 
 /* ----------------------------------------------------------------------------
diff --git a/src/test/regress/expected/tidrangescan.out b/src/test/regress/expected/tidrangescan.out
index 3c5fc9e102a..ce75c96e7c8 100644
--- a/src/test/regress/expected/tidrangescan.out
+++ b/src/test/regress/expected/tidrangescan.out
@@ -297,22 +297,23 @@ FETCH LAST c;
 
 COMMIT;
 DROP TABLE tidrangescan;
--- tests for parallel tidrangescans
-SET parallel_setup_cost TO 0;
-SET parallel_tuple_cost TO 0;
-SET min_parallel_table_scan_size TO 0;
-SET max_parallel_workers_per_gather TO 4;
+-- Tests for parallel tidrangescans
+BEGIN;
+SET LOCAL parallel_setup_cost TO 0;
+SET LOCAL parallel_tuple_cost TO 0;
+SET LOCAL min_parallel_table_scan_size TO 0;
+SET LOCAL max_parallel_workers_per_gather TO 4;
 CREATE TABLE parallel_tidrangescan(id integer, data text) WITH (fillfactor = 10);
--- insert enough tuples such that each page gets 5 tuples with fillfactor = 10
+-- Insert enough tuples such that each page gets 5 tuples with fillfactor = 10
 INSERT INTO parallel_tidrangescan SELECT i, repeat('x', 100) FROM generate_series(1,200) AS s(i);
--- ensure there are 40 pages for parallel test
+-- Ensure there are 40 pages for parallel test
 SELECT min(ctid), max(ctid) FROM parallel_tidrangescan;
   min  |  max   
 -------+--------
  (0,1) | (39,5)
 (1 row)
 
--- parallel range scans with upper bound
+-- Parallel range scans with upper bound
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
                              QUERY PLAN                             
@@ -331,7 +332,7 @@ SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
    150
 (1 row)
 
--- parallel range scans with lower bound
+-- Parallel range scans with lower bound
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
                              QUERY PLAN                             
@@ -350,7 +351,7 @@ SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
    150
 (1 row)
 
--- parallel range scans with both bounds
+-- Parallel range scans with both bounds
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)';
                                     QUERY PLAN                                     
@@ -369,7 +370,7 @@ SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30
    100
 (1 row)
 
--- parallel rescans
+-- Parallel rescans
 EXPLAIN (COSTS OFF)
 SELECT t.ctid,t2.c FROM parallel_tidrangescan t,
 LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2
@@ -398,9 +399,5 @@ WHERE t.ctid < '(1,0)';
  (0,5) | 5
 (5 rows)
 
-DROP TABLE parallel_tidrangescan;
-RESET parallel_setup_cost;
-RESET parallel_tuple_cost;
-RESET min_parallel_table_scan_size;
-RESET max_parallel_workers_per_gather;
+ROLLBACK;
 RESET enable_seqscan;
diff --git a/src/test/regress/sql/tidrangescan.sql b/src/test/regress/sql/tidrangescan.sql
index 0f1e43c6d05..c9a63b10ddd 100644
--- a/src/test/regress/sql/tidrangescan.sql
+++ b/src/test/regress/sql/tidrangescan.sql
@@ -98,36 +98,38 @@ COMMIT;
 
 DROP TABLE tidrangescan;
 
--- tests for parallel tidrangescans
-SET parallel_setup_cost TO 0;
-SET parallel_tuple_cost TO 0;
-SET min_parallel_table_scan_size TO 0;
-SET max_parallel_workers_per_gather TO 4;
+-- Tests for parallel tidrangescans
+BEGIN;
+
+SET LOCAL parallel_setup_cost TO 0;
+SET LOCAL parallel_tuple_cost TO 0;
+SET LOCAL min_parallel_table_scan_size TO 0;
+SET LOCAL max_parallel_workers_per_gather TO 4;
 
 CREATE TABLE parallel_tidrangescan(id integer, data text) WITH (fillfactor = 10);
 
--- insert enough tuples such that each page gets 5 tuples with fillfactor = 10
+-- Insert enough tuples such that each page gets 5 tuples with fillfactor = 10
 INSERT INTO parallel_tidrangescan SELECT i, repeat('x', 100) FROM generate_series(1,200) AS s(i);
 
--- ensure there are 40 pages for parallel test
+-- Ensure there are 40 pages for parallel test
 SELECT min(ctid), max(ctid) FROM parallel_tidrangescan;
 
--- parallel range scans with upper bound
+-- Parallel range scans with upper bound
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
 
--- parallel range scans with lower bound
+-- Parallel range scans with lower bound
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
 
--- parallel range scans with both bounds
+-- Parallel range scans with both bounds
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)';
 SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)';
 
--- parallel rescans
+-- Parallel rescans
 EXPLAIN (COSTS OFF)
 SELECT t.ctid,t2.c FROM parallel_tidrangescan t,
 LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2
@@ -137,10 +139,5 @@ SELECT t.ctid,t2.c FROM parallel_tidrangescan t,
 LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2
 WHERE t.ctid < '(1,0)';
 
-DROP TABLE parallel_tidrangescan;
-
-RESET parallel_setup_cost;
-RESET parallel_tuple_cost;
-RESET min_parallel_table_scan_size;
-RESET max_parallel_workers_per_gather;
+ROLLBACK;
 RESET enable_seqscan;
-- 
2.43.0

