diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index bcf9871..4e913bd 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -63,6 +63,7 @@
 #include "storage/predicate.h"
 #include "storage/procarray.h"
 #include "storage/smgr.h"
+#include "storage/spin.h"
 #include "storage/standby.h"
 #include "utils/datum.h"
 #include "utils/inval.h"
@@ -80,12 +81,15 @@ bool		synchronize_seqscans = true;
 static HeapScanDesc heap_beginscan_internal(Relation relation,
 						Snapshot snapshot,
 						int nkeys, ScanKey key,
+						ParallelHeapScanDesc parallel_scan,
 						bool allow_strat,
 						bool allow_sync,
 						bool allow_pagemode,
 						bool is_bitmapscan,
 						bool is_samplescan,
 						bool temp_snap);
+static BlockNumber heap_parallelscan_nextpage(HeapScanDesc scan);
+static void heap_parallelscan_initialize_startblock(HeapScanDesc scan);
 static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
 					TransactionId xid, CommandId cid, int options);
 static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
@@ -226,16 +230,21 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
 	 * results for a non-MVCC snapshot, the caller must hold some higher-level
 	 * lock that ensures the interesting tuple(s) won't change.)
 	 */
-	scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
+	if (scan->rs_parallel != NULL)
+		scan->rs_nblocks = scan->rs_parallel->phs_nblocks;
+	else
+		scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
 
 	/*
 	 * If the table is large relative to NBuffers, use a bulk-read access
-	 * strategy and enable synchronized scanning (see syncscan.c).  Although
-	 * the thresholds for these features could be different, we make them the
-	 * same so that there are only two behaviors to tune rather than four.
-	 * (However, some callers need to be able to disable one or both of these
-	 * behaviors, independently of the size of the table; also there is a GUC
-	 * variable that can disable synchronized scanning.)
+	 * strategy and enable synchronized scanning (see syncscan.c, if the
+	 * condition to enable sync syncscans is changed here, then do the same
+	 * change in heap_parallelscan_initialize()).  Although the thresholds for
+	 * these features could be different, we make them the same so that there
+	 * are only two behaviors to tune rather than four.  (However, some
+	 * callers need to be able to disable one or both of these behaviors,
+	 * independently of the size of the table; also there is a GUC variable
+	 * that can disable synchronized scanning.)
 	 *
 	 * During a rescan, don't make a new strategy object if we don't have to.
 	 */
@@ -272,7 +281,10 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
 	else if (allow_sync && synchronize_seqscans)
 	{
 		scan->rs_syncscan = true;
-		scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
+		if (scan->rs_parallel != NULL)
+			heap_parallelscan_initialize_startblock(scan);
+		else
+			scan->rs_startblock = ss_get_location(scan->rs_rd, scan->rs_nblocks);
 	}
 	else
 	{
@@ -496,7 +508,25 @@ heapgettup(HeapScanDesc scan,
 				tuple->t_data = NULL;
 				return;
 			}
-			page = scan->rs_startblock; /* first page */
+			if (scan->rs_parallel != NULL)
+			{
+				page = heap_parallelscan_nextpage(scan);
+
+				/*
+				 * Return NULL if the scan is finished. It can so happen that
+				 * by the time one of workers started the scan, others have
+				 * already completed scanning the relation, so this worker
+				 * won't need to perform scan.
+				 */
+				if (page == InvalidBlockNumber)
+				{
+					Assert(!BufferIsValid(scan->rs_cbuf));
+					tuple->t_data = NULL;
+					return;
+				}
+			}
+			else
+				page = scan->rs_startblock;		/* first page */
 			heapgetpage(scan, page);
 			lineoff = FirstOffsetNumber;		/* first offnum */
 			scan->rs_inited = true;
@@ -519,6 +549,9 @@ heapgettup(HeapScanDesc scan,
 	}
 	else if (backward)
 	{
+		/* backward parallel scan not supported */
+		Assert(scan->rs_parallel == NULL);
+
 		if (!scan->rs_inited)
 		{
 			/*
@@ -671,11 +704,20 @@ heapgettup(HeapScanDesc scan,
 		}
 		else
 		{
-			page++;
-			if (page >= scan->rs_nblocks)
-				page = 0;
-			finished = (page == scan->rs_startblock) ||
-				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
+			if (scan->rs_parallel != NULL)
+			{
+				page = heap_parallelscan_nextpage(scan);
+				finished = (page == InvalidBlockNumber);
+			}
+			else
+			{
+				page++;
+				if (page >= scan->rs_nblocks)
+					page = 0;
+
+				finished = (page == scan->rs_startblock) ||
+					(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
+			}
 
 			/*
 			 * Report our new scan position for synchronization purposes. We
@@ -773,7 +815,25 @@ heapgettup_pagemode(HeapScanDesc scan,
 				tuple->t_data = NULL;
 				return;
 			}
-			page = scan->rs_startblock; /* first page */
+			if (scan->rs_parallel != NULL)
+			{
+				page = heap_parallelscan_nextpage(scan);
+
+				/*
+				 * Return NULL if the scan is finished. It can so happen that
+				 * by the time one of workers started the scan, others have
+				 * already completed scanning the relation, so this worker
+				 * won't need to perform scan.
+				 */
+				if (page == InvalidBlockNumber)
+				{
+					Assert(!BufferIsValid(scan->rs_cbuf));
+					tuple->t_data = NULL;
+					return;
+				}
+			}
+			else
+				page = scan->rs_startblock;		/* first page */
 			heapgetpage(scan, page);
 			lineindex = 0;
 			scan->rs_inited = true;
@@ -793,6 +853,9 @@ heapgettup_pagemode(HeapScanDesc scan,
 	}
 	else if (backward)
 	{
+		/* backward parallel scan not supported */
+		Assert(scan->rs_parallel == NULL);
+
 		if (!scan->rs_inited)
 		{
 			/*
@@ -934,11 +997,20 @@ heapgettup_pagemode(HeapScanDesc scan,
 		}
 		else
 		{
-			page++;
-			if (page >= scan->rs_nblocks)
-				page = 0;
-			finished = (page == scan->rs_startblock) ||
-				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
+			if (scan->rs_parallel != NULL)
+			{
+				page = heap_parallelscan_nextpage(scan);
+				finished = (page == InvalidBlockNumber);
+			}
+			else
+			{
+				page++;
+				if (page >= scan->rs_nblocks)
+					page = 0;
+
+				finished = (page == scan->rs_startblock) ||
+					(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false);
+			}
 
 			/*
 			 * Report our new scan position for synchronization purposes. We
@@ -1341,7 +1413,7 @@ HeapScanDesc
 heap_beginscan(Relation relation, Snapshot snapshot,
 			   int nkeys, ScanKey key)
 {
-	return heap_beginscan_internal(relation, snapshot, nkeys, key,
+	return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
 								   true, true, true, false, false, false);
 }
 
@@ -1351,7 +1423,7 @@ heap_beginscan_catalog(Relation relation, int nkeys, ScanKey key)
 	Oid			relid = RelationGetRelid(relation);
 	Snapshot	snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
 
-	return heap_beginscan_internal(relation, snapshot, nkeys, key,
+	return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
 								   true, true, true, false, false, true);
 }
 
@@ -1360,7 +1432,7 @@ heap_beginscan_strat(Relation relation, Snapshot snapshot,
 					 int nkeys, ScanKey key,
 					 bool allow_strat, bool allow_sync)
 {
-	return heap_beginscan_internal(relation, snapshot, nkeys, key,
+	return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
 								   allow_strat, allow_sync, true,
 								   false, false, false);
 }
@@ -1369,7 +1441,7 @@ HeapScanDesc
 heap_beginscan_bm(Relation relation, Snapshot snapshot,
 				  int nkeys, ScanKey key)
 {
-	return heap_beginscan_internal(relation, snapshot, nkeys, key,
+	return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
 								   false, false, true, true, false, false);
 }
 
@@ -1378,7 +1450,7 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
 						int nkeys, ScanKey key,
 					  bool allow_strat, bool allow_sync, bool allow_pagemode)
 {
-	return heap_beginscan_internal(relation, snapshot, nkeys, key,
+	return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
 								   allow_strat, allow_sync, allow_pagemode,
 								   false, true, false);
 }
@@ -1386,6 +1458,7 @@ heap_beginscan_sampling(Relation relation, Snapshot snapshot,
 static HeapScanDesc
 heap_beginscan_internal(Relation relation, Snapshot snapshot,
 						int nkeys, ScanKey key,
+						ParallelHeapScanDesc parallel_scan,
 						bool allow_strat,
 						bool allow_sync,
 						bool allow_pagemode,
@@ -1418,6 +1491,7 @@ heap_beginscan_internal(Relation relation, Snapshot snapshot,
 	scan->rs_allow_strat = allow_strat;
 	scan->rs_allow_sync = allow_sync;
 	scan->rs_temp_snap = temp_snap;
+	scan->rs_parallel = parallel_scan;
 
 	/*
 	 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
@@ -1452,6 +1526,13 @@ heap_beginscan_internal(Relation relation, Snapshot snapshot,
 
 	initscan(scan, key, false);
 
+	/*
+	 * Ensure all the backends participating in parallel scan must share the
+	 * syncscan property.
+	 */
+	if (parallel_scan)
+		scan->rs_syncscan = parallel_scan->phs_syncscan;
+
 	return scan;
 }
 
@@ -1532,6 +1613,165 @@ heap_endscan(HeapScanDesc scan)
 }
 
 /* ----------------
+ *		heap_parallelscan_estimate - estimate storage for ParallelHeapScanDesc
+ *
+ *		Sadly, this doesn't reduce to a constant, because the size required
+ *		to serialize the snapshot can vary.
+ * ----------------
+ */
+Size
+heap_parallelscan_estimate(Snapshot snapshot)
+{
+	return add_size(offsetof(ParallelHeapScanDescData, phs_snapshot_data),
+					EstimateSnapshotSpace(snapshot));
+}
+
+/* ----------------
+ *		heap_parallelscan_initialize - initialize ParallelHeapScanDesc
+ *
+ *		Must allow as many bytes of shared memory as returned by
+ *		heap_parallelscan_estimate.  Call this just once in the leader
+ *		process; then, individual workers attach via heap_beginscan_parallel.
+ * ----------------
+ */
+void
+heap_parallelscan_initialize(ParallelHeapScanDesc target, Relation relation,
+							 Snapshot snapshot, bool allow_sync)
+{
+	bool		check_sync_allowed;
+
+	target->phs_relid = RelationGetRelid(relation);
+	target->phs_nblocks = RelationGetNumberOfBlocks(relation);
+	SpinLockInit(&target->phs_mutex);
+	target->phs_cblock = InvalidBlockNumber;
+	target->phs_startblock = InvalidBlockNumber;
+
+	/*
+	 * If the table is large relative to NBuffers, enable synchronized
+	 * scanning (see syncscan.c).
+	 */
+	if (!RelationUsesLocalBuffers(relation) &&
+		target->phs_nblocks > NBuffers / 4)
+		check_sync_allowed = allow_sync;
+	else
+		check_sync_allowed = false;
+
+	if (check_sync_allowed && synchronize_seqscans)
+		target->phs_syncscan = true;
+	else
+		target->phs_syncscan = false;
+
+	SerializeSnapshot(snapshot, target->phs_snapshot_data);
+}
+
+/* ----------------
+ *		heap_parallelscan_initialize_startblock - initialize the startblock for
+ *					parallel scan.
+ *
+ *		Only the first worker of parallel scan will initialize the start
+ *		block for scan and others will use that information to indicate
+ *		the end of scan.
+ * ----------------
+ */
+static void
+heap_parallelscan_initialize_startblock(HeapScanDesc scan)
+{
+	ParallelHeapScanDesc parallel_scan;
+	BlockNumber page;
+
+	Assert(scan->rs_parallel);
+
+	parallel_scan = scan->rs_parallel;
+
+	SpinLockAcquire(&parallel_scan->phs_mutex);
+	page = parallel_scan->phs_startblock;
+	SpinLockRelease(&parallel_scan->phs_mutex);
+
+	if (page != InvalidBlockNumber)
+		return;					/* some other process already did this */
+
+	page = ss_get_location(scan->rs_rd, scan->rs_nblocks);
+
+	SpinLockAcquire(&parallel_scan->phs_mutex);
+	/* even though we checked before, someone might have beaten us here */
+	if (parallel_scan->phs_startblock == InvalidBlockNumber)
+	{
+		parallel_scan->phs_startblock = page;
+		parallel_scan->phs_cblock = page;
+	}
+	SpinLockRelease(&parallel_scan->phs_mutex);
+}
+
+/* ----------------
+ *		heap_parallelscan_nextpage - get the next page to scan
+ *
+ *		Scanning till the position from where the parallel scan has started
+ *		indicates end of scan.  Note, however, that other backends could still
+ *		be scanning if they grabbed a page to scan and aren't done with it yet.
+ *		Resets the current position for parallel scan to the begining of
+ *		relation, if next page to scan is greater than total number of pages in
+ *		relation.
+ *
+ *		Return value InvalidBlockNumber indicates end of scan.
+ * ----------------
+ */
+static BlockNumber
+heap_parallelscan_nextpage(HeapScanDesc scan)
+{
+	BlockNumber page = InvalidBlockNumber;
+	ParallelHeapScanDesc parallel_scan;
+	bool		report_scan_done = false;
+
+	Assert(scan->rs_parallel);
+
+	parallel_scan = scan->rs_parallel;
+
+	SpinLockAcquire(&parallel_scan->phs_mutex);
+	page = parallel_scan->phs_cblock;
+	if (page != InvalidBlockNumber)
+	{
+		parallel_scan->phs_cblock++;
+		if (parallel_scan->phs_cblock >= scan->rs_nblocks)
+			parallel_scan->phs_cblock = 0;
+		if (parallel_scan->phs_cblock == parallel_scan->phs_startblock)
+		{
+			parallel_scan->phs_cblock = InvalidBlockNumber;
+			report_scan_done = true;
+		}
+	}
+	SpinLockRelease(&parallel_scan->phs_mutex);
+
+	/*
+	 * Report scan location for the first parallel scan to observe the end of
+	 * scan, so that the final state of the position hint is back at the start
+	 * of the rel.
+	 */
+	if (report_scan_done && scan->rs_syncscan)
+		ss_report_location(scan->rs_rd, page);
+
+	return page;
+}
+
+/* ----------------
+ *		heap_beginscan_parallel - join a parallel scan
+ *
+ *		Caller must hold a suitable lock on the correct relation.
+ * ----------------
+ */
+HeapScanDesc
+heap_beginscan_parallel(Relation relation, ParallelHeapScanDesc parallel_scan)
+{
+	Snapshot	snapshot;
+
+	Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
+	snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
+	RegisterSnapshot(snapshot);
+
+	return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
+								   true, true, true, false, false, true);
+}
+
+/* ----------------
  *		heap_getnext	- retrieve next tuple in scan
  *
  *		Fix to work with index relations.
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 75e6b72..98a586d 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -96,8 +96,9 @@ extern Relation heap_openrv_extended(const RangeVar *relation,
 
 #define heap_close(r,l)  relation_close(r,l)
 
-/* struct definition appears in relscan.h */
+/* struct definitions appear in relscan.h */
 typedef struct HeapScanDescData *HeapScanDesc;
+typedef struct ParallelHeapScanDescData *ParallelHeapScanDesc;
 
 /*
  * HeapScanIsValid
@@ -126,6 +127,12 @@ extern void heap_rescan_set_params(HeapScanDesc scan, ScanKey key,
 extern void heap_endscan(HeapScanDesc scan);
 extern HeapTuple heap_getnext(HeapScanDesc scan, ScanDirection direction);
 
+extern Size heap_parallelscan_estimate(Snapshot snapshot);
+extern void heap_parallelscan_initialize(ParallelHeapScanDesc target,
+							 Relation relation, Snapshot snapshot,
+							 bool allow_sync);
+extern HeapScanDesc heap_beginscan_parallel(Relation, ParallelHeapScanDesc);
+
 extern bool heap_fetch(Relation relation, Snapshot snapshot,
 		   HeapTuple tuple, Buffer *userbuf, bool keep_buf,
 		   Relation stats_relation);
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 6e62319..ecc6934 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -20,6 +20,17 @@
 #include "access/itup.h"
 #include "access/tupdesc.h"
 
+/* Struct for parallel scan setup */
+typedef struct ParallelHeapScanDescData
+{
+	Oid			phs_relid;
+	BlockNumber phs_nblocks;
+	slock_t		phs_mutex;
+	BlockNumber phs_cblock;
+	BlockNumber phs_startblock;
+	bool		phs_syncscan;
+	char		phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER];
+}	ParallelHeapScanDescData;
 
 typedef struct HeapScanDescData
 {
@@ -49,6 +60,7 @@ typedef struct HeapScanDescData
 	BlockNumber rs_cblock;		/* current block # in scan, if any */
 	Buffer		rs_cbuf;		/* current buffer in scan, if any */
 	/* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
+	ParallelHeapScanDesc rs_parallel;	/* parallel scan information */
 
 	/* these fields only used in page-at-a-time mode and for bitmap scans */
 	int			rs_cindex;		/* current tuple's index in vistuples */
