diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 5b9289fd4b..f528b3e3a0 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -938,7 +938,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		OffsetNumber offnum,
 					maxoff;
 		bool		tupgone,
-					hastup;
+					hastup,
+					will_dirty = false;
+		double		tups_vacuumed_page = 0;
 		int			prev_dead_count;
 		int			nfrozen;
 		Size		freespace;
@@ -1145,6 +1147,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 					vacrelstats->nonempty_pages = blkno + 1;
 				continue;
 			}
+			else
+				will_dirty = true;
 			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
 			LockBufferForCleanup(buf);
 			/* drop through to normal processing */
@@ -1237,10 +1241,22 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		 *
 		 * We count tuples removed by the pruning step as removed by VACUUM.
 		 */
-		tups_vacuumed += heap_page_prune(onerel, buf, vistest, false,
+		tups_vacuumed_page = heap_page_prune(onerel, buf, vistest, false,
 										 InvalidTransactionId, 0,
 										 &vacrelstats->latestRemovedXid,
 										 &vacrelstats->offnum);
+		tups_vacuumed += tups_vacuumed_page;
+
+		/*
+		 * Check to see if we have or we will dirty the block, based
+		 * on the user requested cutoff values. This will be used later
+		 * to decide whether to maximize freezing.
+		 */
+		if (tups_vacuumed_page > 0)
+			will_dirty = true; /* actually already dirty */
+		else if (!will_dirty &&
+				lazy_check_needs_freeze(buf, &hastup, vacrelstats))
+					will_dirty = true;
 
 		/*
 		 * Now scan the page to collect vacuumable items and check for tuples
@@ -1445,6 +1461,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 													   &vacrelstats->latestRemovedXid);
 				tups_vacuumed += 1;
 				has_dead_tuples = true;
+				if (!will_dirty && nindexes == 0)
+					will_dirty = true;
 			}
 			else
 			{
@@ -1456,10 +1474,15 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				/*
 				 * Each non-removable tuple must be checked to see if it needs
 				 * freezing.  Note we already have exclusive buffer lock.
+				 *
+				 * Once we decide to dirty the data block we may as well freeze
+				 * any tuples that are visible to all, since the additional
+				 * cost of freezing multiple tuples is low.
 				 */
 				if (heap_prepare_freeze_tuple(tuple.t_data,
 											  relfrozenxid, relminmxid,
-											  FreezeLimit, MultiXactCutoff,
+											  (will_dirty ? OldestXmin : FreezeLimit),
+											  (will_dirty ? OldestXmin : MultiXactCutoff),
 											  &frozen[nfrozen],
 											  &tuple_totally_frozen))
 					frozen[nfrozen++].offset = offnum;
