diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index b2d1901..81422af 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -3841,6 +3841,44 @@ recheck_xvac:
 	return changed;
 }
 
+/*
+ * heap_tuple_needs_freeze
+ *
+ * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
+ * are older than the specified cutoff XID.  If so, return TRUE.
+ *
+ * It doesn't matter whether the tuple is alive or dead, we are checking
+ * to see if a tuple needs to be removed or frozen to avoid wraparound.
+ */
+bool
+heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
+				  Buffer buf)
+{
+	TransactionId xid;
+
+	xid = HeapTupleHeaderGetXmin(tuple);
+	if (TransactionIdIsNormal(xid) &&
+		TransactionIdPrecedes(xid, cutoff_xid))
+		return true;
+
+	if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
+	{
+		xid = HeapTupleHeaderGetXmax(tuple);
+		if (TransactionIdIsNormal(xid) &&
+			TransactionIdPrecedes(xid, cutoff_xid))
+			return true;
+	}
+
+	if (tuple->t_infomask & HEAP_MOVED)
+	{
+		xid = HeapTupleHeaderGetXvac(tuple);
+		if (TransactionIdIsNormal(xid) &&
+			TransactionIdPrecedes(xid, cutoff_xid))
+			return true;
+	}
+
+	return false;
+}
 
 /* ----------------
  *		heap_markpos	- mark scan position
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index f42504c..d08ac65 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -1054,7 +1054,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound)
 					vacstmt->freeze_min_age, vacstmt->freeze_table_age);
 	}
 	else
-		lazy_vacuum_rel(onerel, vacstmt, vac_strategy);
+		lazy_vacuum_rel(onerel, vacstmt, vac_strategy, for_wraparound);
 
 	/* Roll back any GUC changes executed by index functions */
 	AtEOXact_GUC(false, save_nestlevel);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index b197b45..8a09a01 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -115,8 +115,10 @@ static BufferAccessStrategy vac_strategy;
 
 /* non-export function prototypes */
 static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
-			   Relation *Irel, int nindexes, bool scan_all);
+			   Relation *Irel, int nindexes, bool scan_all,
+			   bool for_wraparound);
 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
+static bool lazy_scan_page_for_wraparound(Buffer buf);
 static void lazy_vacuum_index(Relation indrel,
 				  IndexBulkDeleteResult **stats,
 				  LVRelStats *vacrelstats);
@@ -146,7 +148,7 @@ static int	vac_cmp_itemptr(const void *left, const void *right);
  */
 void
 lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
-				BufferAccessStrategy bstrategy)
+				BufferAccessStrategy bstrategy, bool for_wraparound)
 {
 	LVRelStats *vacrelstats;
 	Relation   *Irel;
@@ -193,7 +195,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
 	vacrelstats->hasindex = (nindexes > 0);
 
 	/* Do the vacuuming */
-	lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, scan_all);
+	lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, scan_all,
+				   for_wraparound);
 
 	/* Done with indexes */
 	vac_close_indexes(nindexes, Irel, NoLock);
@@ -327,7 +330,7 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
  */
 static void
 lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
-			   Relation *Irel, int nindexes, bool scan_all)
+			   Relation *Irel, int nindexes, bool scan_all, bool for_wraparound)
 {
 	BlockNumber nblocks,
 				blkno;
@@ -453,8 +456,6 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 
 		vacuum_delay_point();
 
-		vacrelstats->scanned_pages++;
-
 		/*
 		 * If we are close to overrunning the available space for dead-tuple
 		 * TIDs, pause and do a cycle of vacuuming before we tackle this page.
@@ -486,7 +487,41 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 								 RBM_NORMAL, vac_strategy);
 
 		/* We need buffer cleanup lock so that we can prune HOT chains. */
-		LockBufferForCleanup(buf);
+		if (!ConditionalLockBufferForCleanup(buf))
+		{
+			/*
+			 * It's OK to skip vacuuming a page, as long as its not got data
+			 * that needs to be cleaned for wraparound avoidance.
+			 */
+			if (!for_wraparound)
+			{
+				ReleaseBuffer(buf);
+				continue;
+			}
+
+			/*
+			 * If this is a wraparound checking vacuum, then we read the page
+			 * with share lock to see if any xids need to be frozen. If the
+			 * page doesn't need attention we just skip and continue. If it
+			 * does, we wait for cleanup lock.
+			 *
+			 * We could defer the lock request further by remembering the page
+			 * and coming back to it later, of we could even register
+			 * ourselves for multiple buffers and then service whichever one
+			 * is received first.  For now, this seems good enough.
+			 */
+			LockBuffer(buf, BUFFER_LOCK_SHARE);
+			if (!lazy_scan_page_for_wraparound(buf))
+			{
+				UnlockReleaseBuffer(buf);
+				continue;
+			}
+			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+			LockBufferForCleanup(buf);
+			/* drop through to normal processing */
+		}
+
+		vacrelstats->scanned_pages++;
 
 		page = BufferGetPage(buf);
 
@@ -932,7 +967,8 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
-		LockBufferForCleanup(buf);
+		if (!ConditionalLockBufferForCleanup(buf))
+			continue;
 		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);
 
 		/* Now that we've compacted the page, record its available space */
@@ -1010,6 +1046,50 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
 }
 
 /*
+ *	lazy_scan_page_for_wraparound() -- scan page to see if any tuples
+ *					 need to be cleaned to avoid wraparound
+ *
+ * Returns true if the page needs to be vacuumed using cleanup lock.
+ */
+static bool
+lazy_scan_page_for_wraparound(Buffer buf)
+{
+	Page		page;
+	OffsetNumber offnum,
+				maxoff;
+	HeapTupleHeader tupleheader;
+
+	page = BufferGetPage(buf);
+
+	if (PageIsNew(page) || PageIsEmpty(page))
+	{
+		/* PageIsNew probably shouldn't happen... */
+		return false;
+	}
+
+	maxoff = PageGetMaxOffsetNumber(page);
+	for (offnum = FirstOffsetNumber;
+		 offnum <= maxoff;
+		 offnum = OffsetNumberNext(offnum))
+	{
+		ItemId		itemid;
+
+		itemid = PageGetItemId(page, offnum);
+
+		if (!ItemIdIsNormal(itemid))
+			continue;
+
+		tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
+
+		if (heap_tuple_needs_freeze(tupleheader, FreezeLimit, buf))
+			return true;
+	}						/* scan along page */
+
+	return false;
+}
+
+
+/*
  *	lazy_vacuum_index() -- vacuum one index relation.
  *
  *		Delete all the index entries pointing to tuples listed in
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 776ea5c..85cbeb3 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -111,6 +111,8 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
 extern void heap_inplace_update(Relation relation, HeapTuple tuple);
 extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
 				  Buffer buf);
+extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
+				  Buffer buf);
 
 extern Oid	simple_heap_insert(Relation relation, HeapTuple tup);
 extern void simple_heap_delete(Relation relation, ItemPointer tid);
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index d8fd0ca..60f21f5 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -162,7 +162,7 @@ extern void vacuum_delay_point(void);
 
 /* in commands/vacuumlazy.c */
 extern void lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
-				BufferAccessStrategy bstrategy);
+				BufferAccessStrategy bstrategy, bool for_wraparound);
 
 /* in commands/analyze.c */
 extern void analyze_rel(Oid relid, VacuumStmt *vacstmt,
