diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index 8867da6c693..e0808afc350 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -376,22 +376,20 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
       <literal>n_distinct_inherited</literal>, which override the
       number-of-distinct-values estimates made by subsequent
       <link linkend="sql-analyze"><command>ANALYZE</command></link>
-      operations.  <literal>n_distinct</literal> affects the statistics for the table
-      itself, while <literal>n_distinct_inherited</literal> affects the statistics
-      gathered for the table plus its inheritance children.  When set to a
-      positive value, <command>ANALYZE</command> will assume that the column contains
-      exactly the specified number of distinct nonnull values.  When set to a
-      negative value, which must be greater
-      than or equal to -1, <command>ANALYZE</command> will assume that the number of
-      distinct nonnull values in the column is linear in the size of the
-      table; the exact count is to be computed by multiplying the estimated
-      table size by the absolute value of the given number.  For example,
+      operations.  Ordinarily <literal>n_distinct</literal> is used.
+      <literal>n_distinct_inherited</literal> exists to allow the distinct
+      estimate to be overwritten for the statistics gathered for inheritance
+      parent tables and for partitioned tables.  When the value specified is a
+      positive value, the query planner will assume that
+      the column contains exactly the specified number of distinct nonnull
+      values.  Fractional values may also be specified by using values below 0 and above
+      or equal to -1.  This instructs the query planner to estimate the number
+      of distinct values by multiplying the absolute value of the specified
+      number by the estimated number of rows in the table.  For example,
       a value of -1 implies that all values in the column are distinct, while
-      a value of -0.5 implies that each value appears twice on the average.
-      This can be useful when the size of the table changes over time, since
-      the multiplication by the number of rows in the table is not performed
-      until query planning time.  Specify a value of 0 to revert to estimating
-      the number of distinct values normally.  For more information on the use
+      a value of -0.5 implies that each value appears twice on average.
+      This can be useful when the size of the table changes over time.
+      For more information on the use
       of statistics by the <productname>PostgreSQL</productname> query
       planner, refer to <xref linkend="planner-stats"/>.
      </para>
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 568696333c2..c9f7d4aa1f7 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -490,6 +490,16 @@ heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlk
 
 	scan->rs_startblock = startBlk;
 	scan->rs_numblocks = numBlks;
+
+	/* set the limits in the ParallelBlockTableScanDesc, when present as leader */
+	if (scan->rs_base.rs_parallel != NULL && !IsParallelWorker())
+	{
+		ParallelBlockTableScanDesc bpscan;
+
+		bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
+		bpscan->phs_startblock = startBlk;
+		bpscan->phs_numblock = numBlks;
+	}
 }
 
 /*
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index 5e41404937e..5ad6963dbbe 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -188,6 +188,42 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
 											pscan, flags);
 }
 
+TableScanDesc
+table_beginscan_parallel_tidrange(Relation relation,
+								  ParallelTableScanDesc pscan,
+								  ItemPointer mintid, ItemPointer maxtid)
+{
+	Snapshot	snapshot;
+	uint32		flags = SO_TYPE_TIDRANGESCAN | SO_ALLOW_PAGEMODE;
+	TableScanDesc sscan;
+
+	Assert(RelFileLocatorEquals(relation->rd_locator, pscan->phs_locator));
+
+	/* disable syncscan in parallel tid range scan. */
+	pscan->phs_syncscan = false;
+
+	if (!pscan->phs_snapshot_any)
+	{
+		/* Snapshot was serialized -- restore it */
+		snapshot = RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
+		RegisterSnapshot(snapshot);
+		flags |= SO_TEMP_SNAPSHOT;
+	}
+	else
+	{
+		/* SnapshotAny passed by caller (not serialized) */
+		snapshot = SnapshotAny;
+	}
+
+	sscan = relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
+											 pscan, flags);
+
+	/* Set the scan limits based on the given min and max tids */
+	relation->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
+
+	return sscan;
+}
+
 
 /* ----------------------------------------------------------------------------
  * Index scan related functions.
@@ -398,6 +434,7 @@ table_block_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
 		bpscan->phs_nblocks > NBuffers / 4;
 	SpinLockInit(&bpscan->phs_mutex);
 	bpscan->phs_startblock = InvalidBlockNumber;
+	bpscan->phs_numblock = InvalidBlockNumber;
 	pg_atomic_init_u64(&bpscan->phs_nallocated, 0);
 
 	return sizeof(ParallelBlockTableScanDescData);
@@ -577,8 +614,15 @@ table_block_parallelscan_nextpage(Relation rel,
 		pbscanwork->phsw_chunk_remaining = pbscanwork->phsw_chunk_size - 1;
 	}
 
+	/*
+	 * Check if we've allocated every block in the relation, or if we've
+	 * reached the limit imposed by pbscan->phs_numblock (if set).
+	 */
 	if (nallocated >= pbscan->phs_nblocks)
-		page = InvalidBlockNumber;	/* all blocks have been allocated */
+		page = InvalidBlockNumber; /* all blocks have been allocated */
+	else if (pbscan->phs_numblock != InvalidBlockNumber &&
+			 nallocated >= pbscan->phs_numblock)
+		page = InvalidBlockNumber; /* upper scan limit reached */
 	else
 		page = (nallocated + pbscan->phs_startblock) % pbscan->phs_nblocks;
 
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index f098a5557cf..c8c41cac1ea 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -40,6 +40,7 @@
 #include "executor/nodeSeqscan.h"
 #include "executor/nodeSort.h"
 #include "executor/nodeSubplan.h"
+#include "executor/nodeTidrangescan.h"
 #include "executor/tqueue.h"
 #include "jit/jit.h"
 #include "nodes/nodeFuncs.h"
@@ -266,6 +267,11 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
 				ExecForeignScanEstimate((ForeignScanState *) planstate,
 										e->pcxt);
 			break;
+		case T_TidRangeScanState:
+			if (planstate->plan->parallel_aware)
+				ExecTidRangeScanEstimate((TidRangeScanState *) planstate,
+										 e->pcxt);
+			break;
 		case T_AppendState:
 			if (planstate->plan->parallel_aware)
 				ExecAppendEstimate((AppendState *) planstate,
@@ -493,6 +499,11 @@ ExecParallelInitializeDSM(PlanState *planstate,
 				ExecForeignScanInitializeDSM((ForeignScanState *) planstate,
 											 d->pcxt);
 			break;
+		case T_TidRangeScanState:
+			if (planstate->plan->parallel_aware)
+				ExecTidRangeScanInitializeDSM((TidRangeScanState *) planstate,
+										 	  d->pcxt);
+			break;
 		case T_AppendState:
 			if (planstate->plan->parallel_aware)
 				ExecAppendInitializeDSM((AppendState *) planstate,
@@ -994,6 +1005,11 @@ ExecParallelReInitializeDSM(PlanState *planstate,
 				ExecForeignScanReInitializeDSM((ForeignScanState *) planstate,
 											   pcxt);
 			break;
+		case T_TidRangeScanState:
+			if (planstate->plan->parallel_aware)
+				ExecTidRangeScanReInitializeDSM((TidRangeScanState *) planstate,
+												pcxt);
+			break;
 		case T_AppendState:
 			if (planstate->plan->parallel_aware)
 				ExecAppendReInitializeDSM((AppendState *) planstate, pcxt);
@@ -1362,6 +1378,11 @@ ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
 				ExecForeignScanInitializeWorker((ForeignScanState *) planstate,
 												pwcxt);
 			break;
+		case T_TidRangeScanState:
+			if (planstate->plan->parallel_aware)
+				ExecTidRangeScanInitializeWorker((TidRangeScanState *) planstate,
+												 pwcxt);
+			break;
 		case T_AppendState:
 			if (planstate->plan->parallel_aware)
 				ExecAppendInitializeWorker((AppendState *) planstate, pwcxt);
diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c
index 1bce8d6cbfe..065bd37d9dd 100644
--- a/src/backend/executor/nodeTidrangescan.c
+++ b/src/backend/executor/nodeTidrangescan.c
@@ -128,14 +128,17 @@ TidExprListCreate(TidRangeScanState *tidrangestate)
  *		TidRangeEval
  *
  *		Compute and set node's block and offset range to scan by evaluating
- *		node->trss_tidexprs.  Returns false if we detect the range cannot
- *		contain any tuples.  Returns true if it's possible for the range to
- *		contain tuples.  We don't bother validating that trss_mintid is less
- *		than or equal to trss_maxtid, as the scan_set_tidrange() table AM
- *		function will handle that.
+ *		node->trss_tidexprs.  Sets node's trss_rangeIsEmpty to true if the
+ *		calculated range must be empty of any tuples, otherwise sets
+ *		trss_rangeIsEmpty to false and sets trss_mintid and trss_maxtid to the
+ *		calculated range.
+ *
+ *		We don't bother validating that trss_mintid is less than or equal to
+ *		trss_maxtid, as the scan_set_tidrange() table AM function will handle
+ *		that.
  * ----------------------------------------------------------------
  */
-static bool
+static void
 TidRangeEval(TidRangeScanState *node)
 {
 	ExprContext *econtext = node->ss.ps.ps_ExprContext;
@@ -165,7 +168,10 @@ TidRangeEval(TidRangeScanState *node)
 
 		/* If the bound is NULL, *nothing* matches the qual. */
 		if (isNull)
-			return false;
+		{
+			node->trss_rangeIsEmpty = true;
+			return;
+		}
 
 		if (tidopexpr->exprtype == TIDEXPR_LOWER_BOUND)
 		{
@@ -207,7 +213,7 @@ TidRangeEval(TidRangeScanState *node)
 	ItemPointerCopy(&lowerBound, &node->trss_mintid);
 	ItemPointerCopy(&upperBound, &node->trss_maxtid);
 
-	return true;
+	node->trss_rangeIsEmpty = false;
 }
 
 /* ----------------------------------------------------------------
@@ -234,12 +240,19 @@ TidRangeNext(TidRangeScanState *node)
 	slot = node->ss.ss_ScanTupleSlot;
 	direction = estate->es_direction;
 
-	if (!node->trss_inScan)
+	/* First time through, compute TID range to scan */
+	if (!node->trss_rangeCalcDone)
 	{
-		/* First time through, compute TID range to scan */
-		if (!TidRangeEval(node))
-			return NULL;
+		TidRangeEval(node);
+		node->trss_rangeCalcDone = true;
+	}
 
+	/* Check if the range was detected not to contain any tuples */
+	if (node->trss_rangeIsEmpty)
+		return NULL;
+
+	if (!node->trss_inScan)
+	{
 		if (scandesc == NULL)
 		{
 			scandesc = table_beginscan_tidrange(node->ss.ss_currentRelation,
@@ -250,9 +263,13 @@ TidRangeNext(TidRangeScanState *node)
 		}
 		else
 		{
-			/* rescan with the updated TID range */
-			table_rescan_tidrange(scandesc, &node->trss_mintid,
-								  &node->trss_maxtid);
+			/* rescan with the updated TID range only in non-parallel mode */
+			if (scandesc->rs_parallel == NULL)
+			{
+				/* rescan with the updated TID range */
+				table_rescan_tidrange(scandesc, &node->trss_mintid,
+									  &node->trss_maxtid);
+			}
 		}
 
 		node->trss_inScan = true;
@@ -274,13 +291,18 @@ TidRangeNext(TidRangeScanState *node)
 static bool
 TidRangeRecheck(TidRangeScanState *node, TupleTableSlot *slot)
 {
-	if (!TidRangeEval(node))
-		return false;
+	/* First call? Compute the TID Range */
+	if (!node->trss_rangeCalcDone)
+	{
+		TidRangeEval(node);
+		node->trss_rangeCalcDone = true;
+	}
 
 	Assert(ItemPointerIsValid(&slot->tts_tid));
 
 	/* Recheck the ctid is still within range */
-	if (ItemPointerCompare(&slot->tts_tid, &node->trss_mintid) < 0 ||
+	if (node->trss_rangeIsEmpty ||
+		ItemPointerCompare(&slot->tts_tid, &node->trss_mintid) < 0 ||
 		ItemPointerCompare(&slot->tts_tid, &node->trss_maxtid) > 0)
 		return false;
 
@@ -322,6 +344,9 @@ ExecReScanTidRangeScan(TidRangeScanState *node)
 	/* mark scan as not in progress, and tid range list as not computed yet */
 	node->trss_inScan = false;
 
+	/* mark that the TID range needs to be recalculated */
+	node->trss_rangeCalcDone = false;
+
 	/*
 	 * We must wait until TidRangeNext before calling table_rescan_tidrange.
 	 */
@@ -380,6 +405,10 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags)
 	 * mark scan as not in progress, and TID range as not computed yet
 	 */
 	tidrangestate->trss_inScan = false;
+	tidrangestate->trss_rangeCalcDone = false;
+
+	/* This will be calculated correctly in TidRangeEval() */
+	tidrangestate->trss_rangeIsEmpty = true;
 
 	/*
 	 * open the scan relation
@@ -415,3 +444,109 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags)
 	 */
 	return tidrangestate;
 }
+/* ----------------------------------------------------------------
+ *						Parallel Scan Support
+ * ----------------------------------------------------------------
+ */
+
+/* ----------------------------------------------------------------
+ *		ExecTidRangeScanEstimate
+ *
+ *		Compute the amount of space we'll need in the parallel
+ *		query DSM, and inform pcxt->estimator about our needs.
+ * ----------------------------------------------------------------
+ */
+void
+ExecTidRangeScanEstimate(TidRangeScanState *node, ParallelContext *pcxt)
+{
+	EState	   *estate = node->ss.ps.state;
+
+	node->trss_pscanlen =
+		table_parallelscan_estimate(node->ss.ss_currentRelation,
+									estate->es_snapshot);
+	shm_toc_estimate_chunk(&pcxt->estimator, node->trss_pscanlen);
+	shm_toc_estimate_keys(&pcxt->estimator, 1);
+}
+
+/* ----------------------------------------------------------------
+ *		ExecTidRangeScanInitializeDSM
+ *
+ *		Set up a parallel TID scan descriptor.
+ * ----------------------------------------------------------------
+ */
+void
+ExecTidRangeScanInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt)
+{
+	EState	   *estate = node->ss.ps.state;
+	ParallelTableScanDesc pscan;
+
+	pscan = shm_toc_allocate(pcxt->toc, node->trss_pscanlen);
+	table_parallelscan_initialize(node->ss.ss_currentRelation,
+								  pscan,
+								  estate->es_snapshot);
+	shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan);
+
+	/* Determine the TID Range */
+	TidRangeEval(node);
+	node->trss_rangeCalcDone = true;
+
+	/* Setup the scandesc, unless TidRangeEval found the range is empty */
+	if (!node->trss_rangeIsEmpty)
+		node->ss.ss_currentScanDesc =
+				table_beginscan_parallel_tidrange(node->ss.ss_currentRelation,
+												  pscan,
+												  &node->trss_mintid,
+												  &node->trss_maxtid);
+	else
+		node->ss.ss_currentScanDesc = NULL;
+}
+
+/* ----------------------------------------------------------------
+ *		ExecTidRangeScanReInitializeDSM
+ *
+ *		Reset shared state before beginning a fresh scan.
+ * ----------------------------------------------------------------
+ */
+void
+ExecTidRangeScanReInitializeDSM(TidRangeScanState *node,
+								ParallelContext *pcxt)
+{
+	ParallelTableScanDesc pscan;
+
+	pscan = node->ss.ss_currentScanDesc->rs_parallel;
+	table_parallelscan_reinitialize(node->ss.ss_currentRelation, pscan);
+
+	/* Determine the TID Range */
+	TidRangeEval(node);
+	node->trss_rangeCalcDone = true;
+
+	if (!node->trss_rangeIsEmpty)
+		node->ss.ss_currentRelation->rd_tableam->scan_set_tidrange(node->ss.ss_currentScanDesc,
+																   &node->trss_mintid,
+																   &node->trss_maxtid);
+}
+
+/* ----------------------------------------------------------------
+ *		ExecTidRangeScanInitializeWorker
+ *
+ *		Copy relevant information from TOC into planstate.
+ * ----------------------------------------------------------------
+ */
+void
+ExecTidRangeScanInitializeWorker(TidRangeScanState *node,
+							ParallelWorkerContext *pwcxt)
+{
+	ParallelTableScanDesc pscan;
+
+	pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
+
+	/* Determine the TID Range */
+	TidRangeEval(node);
+	node->trss_rangeCalcDone = true;
+
+	if (!node->trss_rangeIsEmpty)
+		node->ss.ss_currentScanDesc = table_beginscan_parallel_tidrange(node->ss.ss_currentRelation,
+																		pscan,
+																		&node->trss_mintid,
+																		&node->trss_maxtid);
+}
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 94077e6a006..03ac556c73d 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1366,8 +1366,9 @@ cost_tidrangescan(Path *path, PlannerInfo *root,
 {
 	Selectivity selectivity;
 	double		pages;
-	Cost		startup_cost = 0;
-	Cost		run_cost = 0;
+	Cost		startup_cost;
+	Cost		cpu_run_cost;
+	Cost		disk_run_cost;
 	QualCost	qpqual_cost;
 	Cost		cpu_per_tuple;
 	QualCost	tid_qual_cost;
@@ -1396,11 +1397,7 @@ cost_tidrangescan(Path *path, PlannerInfo *root,
 
 	/*
 	 * The first page in a range requires a random seek, but each subsequent
-	 * page is just a normal sequential page read. NOTE: it's desirable for
-	 * TID Range Scans to cost more than the equivalent Sequential Scans,
-	 * because Seq Scans have some performance advantages such as scan
-	 * synchronization and parallelizability, and we'd prefer one of them to
-	 * be picked unless a TID Range Scan really is better.
+	 * page is just a normal sequential page read.
 	 */
 	ntuples = selectivity * baserel->tuples;
 	nseqpages = pages - 1.0;
@@ -1417,7 +1414,7 @@ cost_tidrangescan(Path *path, PlannerInfo *root,
 							  &spc_seq_page_cost);
 
 	/* disk costs; 1 random page and the remainder as seq pages */
-	run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
+	disk_run_cost = spc_random_page_cost + spc_seq_page_cost * nseqpages;
 
 	/* Add scanning CPU costs */
 	get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
@@ -1429,20 +1426,35 @@ cost_tidrangescan(Path *path, PlannerInfo *root,
 	 * can't be removed, this is a mistake and we're going to underestimate
 	 * the CPU cost a bit.)
 	 */
-	startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
+	startup_cost = qpqual_cost.startup + tid_qual_cost.per_tuple;
 	cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
 		tid_qual_cost.per_tuple;
-	run_cost += cpu_per_tuple * ntuples;
+	cpu_run_cost = cpu_per_tuple * ntuples;
 
 	/* tlist eval costs are paid per output row, not per tuple scanned */
 	startup_cost += path->pathtarget->cost.startup;
-	run_cost += path->pathtarget->cost.per_tuple * path->rows;
+	cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
+
+	/* Adjust costing for parallelism, if used. */
+	if (path->parallel_workers > 0)
+	{
+		double		parallel_divisor = get_parallel_divisor(path);
+
+		/* The CPU cost is divided among all the workers. */
+		cpu_run_cost /= parallel_divisor;
+
+		/*
+		 * In the case of a parallel plan, the row count needs to represent
+		 * the number of tuples processed per worker.
+		 */
+		path->rows = clamp_row_est(path->rows / parallel_divisor);
+	}
 
 	/* we should not generate this path type when enable_tidscan=false */
 	Assert(enable_tidscan);
 	path->disabled_nodes = 0;
 	path->startup_cost = startup_cost;
-	path->total_cost = startup_cost + run_cost;
+	path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
 }
 
 /*
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 2bfb338b81c..e9e52e4abe7 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -492,7 +492,8 @@ ec_member_matches_ctid(PlannerInfo *root, RelOptInfo *rel,
  * create_tidscan_paths
  *	  Create paths corresponding to direct TID scans of the given rel.
  *
- *	  Candidate paths are added to the rel's pathlist (using add_path).
+ *	  Candidate paths are added to the rel's pathlist (using add_path) and to
+ *	  partial_pathlist via add_partial_path, in the case of partial paths.
  */
 bool
 create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
@@ -553,7 +554,24 @@ create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
 
 		add_path(rel, (Path *) create_tidrangescan_path(root, rel,
 														tidrangequals,
-														required_outer));
+														required_outer,
+														0));
+
+		/* If appropriate, consider parallel tid range scan. */
+		if (rel->consider_parallel && required_outer == NULL)
+		{
+			int			parallel_workers;
+
+			parallel_workers = compute_parallel_worker(rel, rel->pages, -1,
+													   max_parallel_workers_per_gather);
+
+			if (parallel_workers > 0)
+				add_partial_path(rel, (Path *) create_tidrangescan_path(root,
+																		rel,
+																		tidrangequals,
+																		required_outer,
+																		parallel_workers));
+		}
 	}
 
 	/*
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index e8ea78c0c97..4f2a7ddd23f 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -1173,12 +1173,14 @@ subquery_planner(PlannerGlobal *glob, Query *parse, char *plan_name,
 	foreach(l, (List *) parse->havingQual)
 	{
 		Node	   *havingclause = (Node *) lfirst(l);
+		Relids having_relids;
 
 		if (contain_agg_clause(havingclause) ||
 			contain_volatile_functions(havingclause) ||
 			contain_subplans(havingclause) ||
 			(parse->groupClause && parse->groupingSets &&
-			 bms_is_member(root->group_rtindex, pull_varnos(root, havingclause))))
+			((having_relids = pull_varnos(root, havingclause)) == NULL ||
+			  bms_is_member(root->group_rtindex, having_relids))))
 		{
 			/* keep it in HAVING */
 			newHaving = lappend(newHaving, havingclause);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index bca51b4067b..1bb613d44b0 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1261,7 +1261,8 @@ create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, List *tidquals,
  */
 TidRangePath *
 create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel,
-						 List *tidrangequals, Relids required_outer)
+						 List *tidrangequals, Relids required_outer,
+						 int parallel_workers)
 {
 	TidRangePath *pathnode = makeNode(TidRangePath);
 
@@ -1270,9 +1271,9 @@ create_tidrangescan_path(PlannerInfo *root, RelOptInfo *rel,
 	pathnode->path.pathtarget = rel->reltarget;
 	pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
 														  required_outer);
-	pathnode->path.parallel_aware = false;
+	pathnode->path.parallel_aware = (parallel_workers > 0);
 	pathnode->path.parallel_safe = rel->consider_parallel;
-	pathnode->path.parallel_workers = 0;
+	pathnode->path.parallel_workers = parallel_workers;
 	pathnode->path.pathkeys = NIL;	/* always unordered */
 
 	pathnode->tidrangequals = tidrangequals;
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index b5e0fb386c0..3da43557a13 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -96,6 +96,8 @@ typedef struct ParallelBlockTableScanDescData
 	BlockNumber phs_nblocks;	/* # blocks in relation at start of scan */
 	slock_t		phs_mutex;		/* mutual exclusion for setting startblock */
 	BlockNumber phs_startblock; /* starting block number */
+	BlockNumber phs_numblock; 	/* # blocks to scan, or InvalidBlockNumber if
+								 * no limit */
 	pg_atomic_uint64 phs_nallocated;	/* number of blocks allocated to
 										 * workers so far. */
 }			ParallelBlockTableScanDescData;
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index e16bf025692..3c241d10a52 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -1130,6 +1130,18 @@ extern void table_parallelscan_initialize(Relation rel,
 extern TableScanDesc table_beginscan_parallel(Relation relation,
 											  ParallelTableScanDesc pscan);
 
+/*
+ * Begin a parallel tidrange scan. `pscan` needs to have been initialized with
+ * table_parallelscan_initialize(), for the same relation. The initialization
+ * does not need to have happened in this backend.
+ *
+ * Caller must hold a suitable lock on the relation.
+ */
+extern TableScanDesc table_beginscan_parallel_tidrange(Relation relation,
+													   ParallelTableScanDesc pscan,
+													   ItemPointer mintid,
+													   ItemPointer maxtid);
+
 /*
  * Restart a parallel scan.  Call this in the leader process.  Caller is
  * responsible for making sure that all workers have finished the scan
diff --git a/src/include/executor/nodeTidrangescan.h b/src/include/executor/nodeTidrangescan.h
index a831f1202ca..2b5465b3ce4 100644
--- a/src/include/executor/nodeTidrangescan.h
+++ b/src/include/executor/nodeTidrangescan.h
@@ -14,6 +14,7 @@
 #ifndef NODETIDRANGESCAN_H
 #define NODETIDRANGESCAN_H
 
+#include "access/parallel.h"
 #include "nodes/execnodes.h"
 
 extern TidRangeScanState *ExecInitTidRangeScan(TidRangeScan *node,
@@ -21,4 +22,10 @@ extern TidRangeScanState *ExecInitTidRangeScan(TidRangeScan *node,
 extern void ExecEndTidRangeScan(TidRangeScanState *node);
 extern void ExecReScanTidRangeScan(TidRangeScanState *node);
 
+/* parallel scan support */
+extern void ExecTidRangeScanEstimate(TidRangeScanState *node, ParallelContext *pcxt);
+extern void ExecTidRangeScanInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt);
+extern void ExecTidRangeScanReInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt);
+extern void ExecTidRangeScanInitializeWorker(TidRangeScanState *node, ParallelWorkerContext *pwcxt);
+
 #endif							/* NODETIDRANGESCAN_H */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index a36653c37f9..c6e3ccff046 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1924,6 +1924,9 @@ typedef struct TidScanState
  *		trss_mintid			the lowest TID in the scan range
  *		trss_maxtid			the highest TID in the scan range
  *		trss_inScan			is a scan currently in progress?
+ *		trss_rangeCalcDone	has the TID range been calculated yet?
+ *		trss_rangeIsEmpty	true if the TID range is certainly empty
+ *		trss_pscanlen		size of parallel heap scan descriptor
  * ----------------
  */
 typedef struct TidRangeScanState
@@ -1933,6 +1936,9 @@ typedef struct TidRangeScanState
 	ItemPointerData trss_mintid;
 	ItemPointerData trss_maxtid;
 	bool		trss_inScan;
+	bool		trss_rangeCalcDone;
+	bool		trss_rangeIsEmpty;
+	Size		trss_pscanlen;
 } TidRangeScanState;
 
 /* ----------------
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index da60383c2aa..090e836cfc9 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -67,7 +67,8 @@ extern TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel,
 extern TidRangePath *create_tidrangescan_path(PlannerInfo *root,
 											  RelOptInfo *rel,
 											  List *tidrangequals,
-											  Relids required_outer);
+											  Relids required_outer,
+											  int parallel_workers);
 extern AppendPath *create_append_path(PlannerInfo *root, RelOptInfo *rel,
 									  List *subpaths, List *partial_subpaths,
 									  List *pathkeys, Relids required_outer,
diff --git a/src/test/regress/expected/tidrangescan.out b/src/test/regress/expected/tidrangescan.out
index 721f3b94e04..bbb1b87fa89 100644
--- a/src/test/regress/expected/tidrangescan.out
+++ b/src/test/regress/expected/tidrangescan.out
@@ -297,4 +297,110 @@ FETCH LAST c;
 
 COMMIT;
 DROP TABLE tidrangescan;
+-- Tests for parallel tidrangescans
+SET parallel_setup_cost TO 0;
+SET parallel_tuple_cost TO 0;
+SET min_parallel_table_scan_size TO 0;
+SET max_parallel_workers_per_gather TO 4;
+CREATE TABLE parallel_tidrangescan(id integer, data text) WITH (fillfactor = 10);
+-- Insert enough tuples such that each page gets 5 tuples with fillfactor = 10
+INSERT INTO parallel_tidrangescan SELECT i, repeat('x', 100) FROM generate_series(1,200) AS s(i);
+-- Ensure there are 40 pages for parallel test
+SELECT min(ctid), max(ctid) FROM parallel_tidrangescan;
+  min  |  max   
+-------+--------
+ (0,1) | (39,5)
+(1 row)
+
+-- Parallel range scans with upper bound
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
+                             QUERY PLAN                             
+--------------------------------------------------------------------
+ Finalize Aggregate
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial Aggregate
+               ->  Parallel Tid Range Scan on parallel_tidrangescan
+                     TID Cond: (ctid < '(30,1)'::tid)
+(6 rows)
+
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
+ count 
+-------
+   150
+(1 row)
+
+-- Parallel range scans with lower bound
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
+                             QUERY PLAN                             
+--------------------------------------------------------------------
+ Finalize Aggregate
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial Aggregate
+               ->  Parallel Tid Range Scan on parallel_tidrangescan
+                     TID Cond: (ctid > '(10,0)'::tid)
+(6 rows)
+
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
+ count 
+-------
+   150
+(1 row)
+
+-- Parallel range scans with both bounds
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)';
+                                    QUERY PLAN                                     
+-----------------------------------------------------------------------------------
+ Finalize Aggregate
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial Aggregate
+               ->  Parallel Tid Range Scan on parallel_tidrangescan
+                     TID Cond: ((ctid > '(10,0)'::tid) AND (ctid < '(30,1)'::tid))
+(6 rows)
+
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)';
+ count 
+-------
+   100
+(1 row)
+
+-- Parallel rescans
+EXPLAIN (COSTS OFF)
+SELECT t.ctid,t2.c FROM parallel_tidrangescan t,
+LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2
+WHERE t.ctid < '(1,0)';
+                           QUERY PLAN                           
+----------------------------------------------------------------
+ Nested Loop
+   ->  Gather
+         Workers Planned: 4
+         ->  Parallel Tid Range Scan on parallel_tidrangescan t
+               TID Cond: (ctid < '(1,0)'::tid)
+   ->  Aggregate
+         ->  Tid Range Scan on parallel_tidrangescan t2
+               TID Cond: (ctid <= t.ctid)
+(8 rows)
+
+SELECT t.ctid,t2.c FROM parallel_tidrangescan t,
+LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2
+WHERE t.ctid < '(1,0)';
+ ctid  | c 
+-------+---
+ (0,1) | 1
+ (0,2) | 2
+ (0,3) | 3
+ (0,4) | 4
+ (0,5) | 5
+(5 rows)
+
+DROP TABLE parallel_tidrangescan;
+RESET max_parallel_workers_per_gather;
+RESET min_parallel_table_scan_size;
+RESET parallel_tuple_cost;
+RESET parallel_setup_cost;
 RESET enable_seqscan;
diff --git a/src/test/regress/sql/tidrangescan.sql b/src/test/regress/sql/tidrangescan.sql
index ac09ebb6262..f86493530e2 100644
--- a/src/test/regress/sql/tidrangescan.sql
+++ b/src/test/regress/sql/tidrangescan.sql
@@ -98,4 +98,49 @@ COMMIT;
 
 DROP TABLE tidrangescan;
 
+-- Tests for parallel tidrangescans
+SET parallel_setup_cost TO 0;
+SET parallel_tuple_cost TO 0;
+SET min_parallel_table_scan_size TO 0;
+SET max_parallel_workers_per_gather TO 4;
+
+CREATE TABLE parallel_tidrangescan(id integer, data text) WITH (fillfactor = 10);
+
+-- Insert enough tuples such that each page gets 5 tuples with fillfactor = 10
+INSERT INTO parallel_tidrangescan SELECT i, repeat('x', 100) FROM generate_series(1,200) AS s(i);
+
+-- Ensure there are 40 pages for parallel test
+SELECT min(ctid), max(ctid) FROM parallel_tidrangescan;
+
+-- Parallel range scans with upper bound
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)';
+
+-- Parallel range scans with lower bound
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)';
+
+-- Parallel range scans with both bounds
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)';
+SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)';
+
+-- Parallel rescans
+EXPLAIN (COSTS OFF)
+SELECT t.ctid,t2.c FROM parallel_tidrangescan t,
+LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2
+WHERE t.ctid < '(1,0)';
+
+SELECT t.ctid,t2.c FROM parallel_tidrangescan t,
+LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2
+WHERE t.ctid < '(1,0)';
+
+DROP TABLE parallel_tidrangescan;
+
+RESET max_parallel_workers_per_gather;
+RESET min_parallel_table_scan_size;
+RESET parallel_tuple_cost;
+RESET parallel_setup_cost;
 RESET enable_seqscan;
