diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index ae15c0b..dbf4efb 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -80,7 +80,8 @@ static HeapScanDesc heap_beginscan_internal(Relation relation,
 						Snapshot snapshot,
 						int nkeys, ScanKey key,
 						bool allow_strat, bool allow_sync,
-						bool is_bitmapscan, bool temp_snap);
+						bool is_bitmapscan, bool temp_snap,
+						bool allow_prune);
 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
 					TransactionId xid, CommandId cid, int options);
 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
@@ -342,9 +343,11 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
 	snapshot = scan->rs_snapshot;
 
 	/*
-	 * Prune and repair fragmentation for the whole page, if possible.
+	 * Prune and repair fragmentation for the whole page, if possible and
+	 * enabled.
 	 */
-	heap_page_prune_opt(scan->rs_rd, buffer);
+	if (scan->rs_allow_prune)
+		heap_page_prune_opt(scan->rs_rd, buffer);
 
 	/*
 	 * We must hold share lock on the buffer content while examining tuple
@@ -1275,7 +1278,8 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode,
  * heap_beginscan_strat offers an extended API that lets the caller control
  * whether a nondefault buffer access strategy can be used, and whether
  * syncscan can be chosen (possibly resulting in the scan not starting from
- * block zero).  Both of these default to TRUE with plain heap_beginscan.
+ * block zero).  Both of these default to TRUE with plain heap_beginscan,
+ * while allow_prune defaults to FALSE.
  *
  * heap_beginscan_bm is an alternative entry point for setting up a
  * HeapScanDesc for a bitmap heap scan.  Although that scan technology is
@@ -1288,7 +1292,7 @@ heap_beginscan(Relation relation, Snapshot snapshot,
 			   int nkeys, ScanKey key)
 {
 	return heap_beginscan_internal(relation, snapshot, nkeys, key,
-								   true, true, false, false);
+								   true, true, false, false, false);
 }
 
 HeapScanDesc
@@ -1298,31 +1302,33 @@ heap_beginscan_catalog(Relation relation, int nkeys, ScanKey key)
 	Snapshot	snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
 
 	return heap_beginscan_internal(relation, snapshot, nkeys, key,
-								   true, true, false, true);
+								   true, true, false, true, !RecoveryInProgress());
 }
 
 HeapScanDesc
 heap_beginscan_strat(Relation relation, Snapshot snapshot,
 					 int nkeys, ScanKey key,
-					 bool allow_strat, bool allow_sync)
+					 bool allow_strat, bool allow_sync, bool allow_prune)
 {
 	return heap_beginscan_internal(relation, snapshot, nkeys, key,
-								   allow_strat, allow_sync, false, false);
+								   allow_strat, allow_sync, false, false,
+								   allow_prune);
 }
 
 HeapScanDesc
 heap_beginscan_bm(Relation relation, Snapshot snapshot,
-				  int nkeys, ScanKey key)
+				  int nkeys, ScanKey key, bool allow_prune)
 {
 	return heap_beginscan_internal(relation, snapshot, nkeys, key,
-								   false, false, true, false);
+								   false, false, true, false, allow_prune);
 }
 
 static HeapScanDesc
 heap_beginscan_internal(Relation relation, Snapshot snapshot,
 						int nkeys, ScanKey key,
 						bool allow_strat, bool allow_sync,
-						bool is_bitmapscan, bool temp_snap)
+						bool is_bitmapscan, bool temp_snap,
+						bool allow_prune)
 {
 	HeapScanDesc scan;
 
@@ -1348,6 +1354,7 @@ heap_beginscan_internal(Relation relation, Snapshot snapshot,
 	scan->rs_allow_strat = allow_strat;
 	scan->rs_allow_sync = allow_sync;
 	scan->rs_temp_snap = temp_snap;
+	scan->rs_allow_prune = allow_prune;
 
 	/*
 	 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 06b5488..8c244db 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -82,8 +82,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 	 * clean the page. The master will likely issue a cleaning WAL record soon
 	 * anyway, so this is no particular loss.
 	 */
-	if (RecoveryInProgress())
-		return;
+	Assert(!RecoveryInProgress());
 
 	/*
 	 * Use the appropriate xmin horizon for this relation. If it's a proper
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 850008b..2163056 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -317,7 +317,7 @@ systable_beginscan(Relation heapRelation,
 		 */
 		sysscan->scan = heap_beginscan_strat(heapRelation, snapshot,
 											 nkeys, key,
-											 true, false);
+											 true, false, false);
 		sysscan->iscan = NULL;
 	}
 
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 53cf96f..52d716b 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -249,10 +249,39 @@ index_beginscan(Relation heapRelation,
 
 	/*
 	 * Save additional parameters into the scandesc.  Everything else was set
+	 * up by RelationGetIndexScan. Disable pruning in all cases.
+	 */
+	scan->heapRelation = heapRelation;
+	scan->xs_snapshot = snapshot;
+	scan->xs_allow_prune = false;
+
+	return scan;
+}
+
+/*
+ * index_beginscan_strat - start a scan of an index with amgettuple
+ * allowing caller to specify additional scan strategies.
+ *
+ * Caller must be holding suitable locks on the heap and the index.
+ */
+IndexScanDesc
+index_beginscan_strat(Relation heapRelation,
+				Relation indexRelation,
+				Snapshot snapshot,
+				int nkeys, int norderbys,
+				bool allow_prune)
+{
+	IndexScanDesc scan;
+
+	scan = index_beginscan_internal(indexRelation, nkeys, norderbys, snapshot);
+
+	/*
+	 * Save additional parameters into the scandesc.  Everything else was set
 	 * up by RelationGetIndexScan.
 	 */
 	scan->heapRelation = heapRelation;
 	scan->xs_snapshot = snapshot;
+	scan->xs_allow_prune = allow_prune;
 
 	return scan;
 }
@@ -278,6 +307,9 @@ index_beginscan_bitmap(Relation indexRelation,
 	 */
 	scan->xs_snapshot = snapshot;
 
+	/* BitmapHeapScan does pruning if required */
+	scan->xs_allow_prune = false;
+
 	return scan;
 }
 
@@ -520,9 +552,9 @@ index_fetch_heap(IndexScanDesc scan)
 											 ItemPointerGetBlockNumber(tid));
 
 		/*
-		 * Prune page, but only if we weren't already on this page
+		 * Prune page if enabled, but only if we weren't already on this page
 		 */
-		if (prev_buf != scan->xs_cbuf)
+		if (prev_buf != scan->xs_cbuf && scan->xs_allow_prune)
 			heap_page_prune_opt(scan->heapRelation, scan->xs_cbuf);
 	}
 
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index ee10594..beb7fc1 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -2165,7 +2165,8 @@ IndexBuildHeapScan(Relation heapRelation,
 								0,		/* number of keys */
 								NULL,	/* scan key */
 								true,	/* buffer access strategy OK */
-								allow_sync);	/* syncscan OK? */
+								allow_sync,	/* syncscan OK? */
+								false);		/* disallow pruning */
 
 	reltuples = 0;
 
@@ -2543,7 +2544,8 @@ IndexCheckExclusion(Relation heapRelation,
 								0,		/* number of keys */
 								NULL,	/* scan key */
 								true,	/* buffer access strategy OK */
-								true);	/* syncscan OK */
+								true,	/* syncscan OK */
+								false);		/* disallow pruning */
 
 	while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
 	{
@@ -2817,7 +2819,8 @@ validate_index_heapscan(Relation heapRelation,
 								0,		/* number of keys */
 								NULL,	/* scan key */
 								true,	/* buffer access strategy OK */
-								false); /* syncscan not OK */
+								false, /* syncscan not OK */
+								false);		/* disallow pruning */
 
 	/*
 	 * Scan all tuples matching the snapshot.
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 9b1e975..8f9b122 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -334,9 +334,11 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
 	ntup = 0;
 
 	/*
-	 * Prune and repair fragmentation for the whole page, if possible.
+	 * Prune and repair fragmentation for the whole page, if possible and
+	 * allowed.
 	 */
-	heap_page_prune_opt(scan->rs_rd, buffer);
+	if (scan->rs_allow_prune)
+		heap_page_prune_opt(scan->rs_rd, buffer);
 
 	/*
 	 * We must hold share lock on the buffer content while examining tuple
@@ -537,6 +539,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
 {
 	BitmapHeapScanState *scanstate;
 	Relation	currentRelation;
+	bool		relistarget;
 
 	/* check for unsupported flags */
 	Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
@@ -597,6 +600,7 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
 	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
 
 	scanstate->ss.ss_currentRelation = currentRelation;
+	relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid);
 
 	/*
 	 * Even though we aren't going to do a conventional seqscan, it is useful
@@ -605,7 +609,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
 	scanstate->ss.ss_currentScanDesc = heap_beginscan_bm(currentRelation,
 														 estate->es_snapshot,
 														 0,
-														 NULL);
+														 NULL,
+														 relistarget);
 
 	/*
 	 * get the scan type from the relation descriptor.
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index afcd1ff..b154823 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -523,11 +523,12 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
 	/*
 	 * Initialize scan descriptor.
 	 */
-	indexstate->ioss_ScanDesc = index_beginscan(currentRelation,
+	indexstate->ioss_ScanDesc = index_beginscan_strat(currentRelation,
 												indexstate->ioss_RelationDesc,
 												estate->es_snapshot,
 												indexstate->ioss_NumScanKeys,
-											indexstate->ioss_NumOrderByKeys);
+											indexstate->ioss_NumOrderByKeys,
+											relistarget);
 
 	/* Set it up for index-only scan */
 	indexstate->ioss_ScanDesc->xs_want_itup = true;
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 2b89dc6..3512f50 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -603,11 +603,12 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
 	/*
 	 * Initialize scan descriptor.
 	 */
-	indexstate->iss_ScanDesc = index_beginscan(currentRelation,
+	indexstate->iss_ScanDesc = index_beginscan_strat(currentRelation,
 											   indexstate->iss_RelationDesc,
 											   estate->es_snapshot,
 											   indexstate->iss_NumScanKeys,
-											 indexstate->iss_NumOrderByKeys);
+											   indexstate->iss_NumOrderByKeys,
+											   relistarget);
 
 	/*
 	 * If no run-time keys to calculate, go ahead and pass the scankeys to the
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index ab13e47..7240413 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -126,20 +126,30 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags)
 {
 	Relation	currentRelation;
 	HeapScanDesc currentScanDesc;
+	Oid			currentRelid;
+	bool		relistarget;
+
+	/*
+	 * get the relation object id from the relid'th entry in the range table
+	 */
+	currentRelid = ((SeqScan *) node->ps.plan)->scanrelid,
+	relistarget = ExecRelationIsTargetRelation(estate, currentRelid);
 
 	/*
-	 * get the relation object id from the relid'th entry in the range table,
 	 * open that relation and acquire appropriate lock on it.
 	 */
 	currentRelation = ExecOpenScanRelation(estate,
-									  ((SeqScan *) node->ps.plan)->scanrelid,
+										   currentRelid,
 										   eflags);
 
 	/* initialize a heapscan */
-	currentScanDesc = heap_beginscan(currentRelation,
+	currentScanDesc = heap_beginscan_strat(currentRelation,
 									 estate->es_snapshot,
 									 0,
-									 NULL);
+									 NULL,
+									 true,
+									 true,
+									 relistarget);
 
 	node->ss_currentRelation = currentRelation;
 	node->ss_currentScanDesc = currentScanDesc;
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index d99158f..eb993ae 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -135,6 +135,10 @@ extern IndexScanDesc index_beginscan(Relation heapRelation,
 				Relation indexRelation,
 				Snapshot snapshot,
 				int nkeys, int norderbys);
+extern IndexScanDesc index_beginscan_strat(Relation heapRelation,
+				Relation indexRelation,
+				Snapshot snapshot,
+				int nkeys, int norderbys, bool allow_prune);
 extern IndexScanDesc index_beginscan_bitmap(Relation indexRelation,
 					   Snapshot snapshot,
 					   int nkeys);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 493839f..3eea50c 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -109,9 +109,9 @@ extern HeapScanDesc heap_beginscan_catalog(Relation relation, int nkeys,
 					   ScanKey key);
 extern HeapScanDesc heap_beginscan_strat(Relation relation, Snapshot snapshot,
 					 int nkeys, ScanKey key,
-					 bool allow_strat, bool allow_sync);
+					 bool allow_strat, bool allow_sync, bool allow_prune);
 extern HeapScanDesc heap_beginscan_bm(Relation relation, Snapshot snapshot,
-				  int nkeys, ScanKey key);
+				  int nkeys, ScanKey keyi, bool allow_prune);
 extern void heap_rescan(HeapScanDesc scan, ScanKey key);
 extern void heap_endscan(HeapScanDesc scan);
 extern HeapTuple heap_getnext(HeapScanDesc scan, ScanDirection direction);
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 8a57698..815768f 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -33,6 +33,7 @@ typedef struct HeapScanDescData
 	bool		rs_allow_strat; /* allow or disallow use of access strategy */
 	bool		rs_allow_sync;	/* allow or disallow use of syncscan */
 	bool		rs_temp_snap;	/* unregister snapshot at scan end? */
+	bool		rs_allow_prune;	/* HOT-prune during this scan; do not use in Hot Standby */
 
 	/* state set up at initscan time */
 	BlockNumber rs_nblocks;		/* number of blocks to scan */
@@ -71,6 +72,7 @@ typedef struct IndexScanDescData
 	ScanKey		keyData;		/* array of index qualifier descriptors */
 	ScanKey		orderByData;	/* array of ordering op descriptors */
 	bool		xs_want_itup;	/* caller requests index tuples */
+	bool		xs_allow_prune;	/* HOT-prune during this scan; do not use in Hot Standby */
 
 	/* signaling to index AM about killing index tuples */
 	bool		kill_prior_tuple;		/* last-returned tuple is dead */
