diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index dc3499349b..63ad1d8d89 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2119,6 +2119,16 @@ ReleaseBulkInsertStatePin(BulkInsertState bistate)
 	bistate->current_buf = InvalidBuffer;
 }
 
+/*
+ * CheckAndSetAllVisibleBulkInsertState - check if the buffer currently held in
+ * bistate is all-visible, all-frozen and take necessary action.
+ */
+void
+CheckAndSetAllVisibleBulkInsertState(Relation relation, BulkInsertState bistate)
+{
+	if (bistate->current_buf != InvalidBuffer)
+		CheckAndSetPageAllVisible(relation, bistate->current_buf, NULL);
+}
 
 /*
  *	heap_insert		- insert tuple into a heap
@@ -9079,3 +9089,158 @@ heap_mask(char *pagedata, BlockNumber blkno)
 		}
 	}
 }
+
+/*
+ * Check if all tuples in this page are frozen and visible, without doing any
+ * extensive checks.
+ *
+ * The only use of this function is when we are doing heap insertions in
+ * HEAP_INSERT_FROZEN mode (e.g. COPY FREEZE). Since the relation is not
+ * visible to any other transactions when running in this mode, we usually
+ * expect the pages to contain only tuples with frozen-xmin and invalid-xmax.
+ * But in order to guard against the case when our own transaction may has
+ * inserted regular tuples, which are not marked frozen, in the table and for
+ * abundance of caution, we still do this check.
+ *
+ * While we could do more elaborate tests like heap_page_is_all_visible does,
+ * we deliberately try to keep this simple.
+ */
+static bool
+CheckPageIsAllFrozen(Relation relation, Buffer buf)
+{
+	Page		page = BufferGetPage(buf);
+	BlockNumber blockno = BufferGetBlockNumber(buf);
+	OffsetNumber offnum,
+				maxoff;
+
+	/*
+	 * This is a stripped down version of the line pointer scan in
+	 * lazy_scan_heap(). So if you change anything here, also check that code.
+	 */
+	maxoff = PageGetMaxOffsetNumber(page);
+	for (offnum = FirstOffsetNumber;
+		 offnum <= maxoff;
+		 offnum = OffsetNumberNext(offnum))
+	{
+		ItemId		itemid;
+		HeapTupleData tuple;
+
+		itemid = PageGetItemId(page, offnum);
+
+		/* Unused or redirect line pointers are of no interest */
+		if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
+			continue;
+
+		ItemPointerSet(&(tuple.t_self), blockno, offnum);
+
+		/*
+		 * Dead line pointers can have index pointers pointing to them. So
+		 * they can't be treated as visible
+		 */
+		if (ItemIdIsDead(itemid))
+			return false;
+
+		Assert(ItemIdIsNormal(itemid));
+
+		tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
+		tuple.t_len = ItemIdGetLength(itemid);
+		tuple.t_tableOid = RelationGetRelid(relation);
+
+		/*
+		 * If xmin is not frozen, then something else other than the one
+		 * inserting tuples in HEAP_INSERT_FROZEN mode, have inserted tuples in
+		 * this page. Don't mark such a page all-visible and frozen.
+		 */
+		if (!HeapTupleHeaderXminFrozen(tuple.t_data))
+			return false;
+
+		/*
+		 * Similarly, if xmax is set, be paranoid and don't mark the page as
+		 * all-visible and frozen.
+		 */
+		if (HeapTupleHeaderGetRawXmax(tuple.t_data) != InvalidTransactionId)
+			return false;
+	}							/* scan along page */
+
+	return true;
+}
+
+/*
+ * If we are inserting frozen tuples, then check if the given page
+ * has all frozen tuples and mark the page as all-visible and frozen.
+ *
+ * Caller may pass a valid vmbuffer, in which case a valid vmbuffer will be
+ * returned. If valid vmbuffer is not passed, then we don't return one either.
+ */
+void
+CheckAndSetPageAllVisible(Relation relation, Buffer buffer, Buffer *vmbuffer)
+{
+	BlockNumber targetBlock;
+	Page		page;
+	Buffer		myvmbuffer = InvalidBuffer;
+
+	/* Nothing to do if we're passed an Invalid buffer */
+	if (!BufferIsValid(buffer))
+		return;
+
+	targetBlock = BufferGetBlockNumber(buffer);
+	page = BufferGetPage(buffer);
+
+	/*
+	 * Use the passed-in vmbuffer, if available. Otherwise obtain pin the
+	 * required visibility map page before locking the heap page.
+	 */
+	if (vmbuffer && BufferIsValid(*vmbuffer))
+		myvmbuffer = *vmbuffer;
+	else
+		visibilitymap_pin(relation, targetBlock, &myvmbuffer);
+
+	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+	/*
+	 * While we are holding the lock on the page, check if all tuples
+	 * in the page are marked frozen at insertion. We can safely mark
+	 * such page all-visible and set visibility map bits too.
+	 */
+	if (CheckPageIsAllFrozen(relation, buffer))
+		PageSetAllVisible(page);
+
+	MarkBufferDirty(buffer);
+
+	/*
+	 * All the changes to the heap page have been done. If the all-visible
+	 * flag is now set, also set the VM all-visible bit (and, if possible, the
+	 * all-frozen bit) unless this has already been done previously.
+	 *
+	 * Note: This portion of the code resembles to what we do in
+	 * vacuumlazy.c
+	 */
+	if (PageIsAllVisible(page))
+	{
+		uint8		vm_status = visibilitymap_get_status(relation,
+				targetBlock, &myvmbuffer);
+		uint8		flags = 0;
+
+		/* Set the VM all-frozen bit to flag, if needed */
+		if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
+			flags |= VISIBILITYMAP_ALL_VISIBLE;
+		if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0)
+			flags |= VISIBILITYMAP_ALL_FROZEN;
+
+		Assert(BufferIsValid(myvmbuffer));
+		if (flags != 0)
+			visibilitymap_set(relation, targetBlock, buffer, InvalidXLogRecPtr,
+					myvmbuffer, InvalidTransactionId, flags);
+	}
+
+	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+	/*
+	 * If the caller passed a valid vmbuffer, return (potentially different)
+	 * vmbuffer. Otherwise just release the vmbuffer that we pinned.
+	 */
+	if (vmbuffer && BufferIsValid(*vmbuffer))
+		*vmbuffer = myvmbuffer;
+	else if (BufferIsValid(myvmbuffer))
+		ReleaseBuffer(myvmbuffer);
+}
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index a9c8ec43a7..3d978490b6 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -415,7 +415,15 @@ loop:
 		{
 			/* easy case */
 			buffer = ReadBufferBI(relation, targetBlock, RBM_NORMAL, bistate);
-			if (PageIsAllVisible(BufferGetPage(buffer)))
+
+			/*
+			 * Obtain the pin on visibility map buffer if the page is known to
+			 * be all-visible or we are running in HEAP_INSERT_FROZEN mode. In
+			 * latter case, we may decide to mark the page all-visible before
+			 * switching to a new page and hence obtain the pin in advance.
+			 */
+			if (PageIsAllVisible(BufferGetPage(buffer)) ||
+				options & HEAP_INSERT_FROZEN)
 				visibilitymap_pin(relation, targetBlock, vmbuffer);
 			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 		}
@@ -516,13 +524,18 @@ loop:
 		 * code above.
 		 */
 		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-		if (otherBuffer == InvalidBuffer)
-			ReleaseBuffer(buffer);
-		else if (otherBlock != targetBlock)
-		{
+		if (otherBuffer != InvalidBuffer && otherBlock != targetBlock)
 			LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
+
+		/*
+		 * If we're inserting frozen tuples, then check if the current page is
+		 * completely frozen and set the visibility bit.
+		 */
+		if (options & HEAP_INSERT_FROZEN)
+			CheckAndSetPageAllVisible(relation, buffer, vmbuffer);
+
+		if ((otherBuffer == InvalidBuffer) || (otherBlock != targetBlock))
 			ReleaseBuffer(buffer);
-		}
 
 		/* Without FSM, always fall out of the loop and extend */
 		if (!use_fsm)
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index dbb06397e6..468b62f3e3 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2834,6 +2834,15 @@ CopyFrom(CopyState cstate)
 					!has_instead_insert_row_trig &&
 					resultRelInfo->ri_FdwRoutine == NULL;
 
+				/*
+				 * Note: As of PG12, COPY FREEZE is not supported on
+				 * partitioned table. Nevertheless have this check in place so
+				 * that we do the right thing if it ever gets supported.
+				 */
+				if (hi_options & HEAP_INSERT_FROZEN)
+					CheckAndSetAllVisibleBulkInsertState(resultRelInfo->ri_RelationDesc,
+							bistate);
+
 				/*
 				 * We'd better make the bulk insert mechanism gets a new
 				 * buffer when the partition being inserted into changes.
@@ -3047,6 +3056,15 @@ CopyFrom(CopyState cstate)
 								firstBufferedLineNo);
 	}
 
+	/*
+	 * If we are inserting frozen tuples, check if the last page used can also
+	 * be marked as all-visible and all-frozen. This ensures that a table can
+	 * be fully frozen when the data is loaded.
+	 */
+	if (hi_options & HEAP_INSERT_FROZEN)
+		CheckAndSetAllVisibleBulkInsertState(resultRelInfo->ri_RelationDesc,
+				bistate);
+
 	/* Done, clean up */
 	error_context_stack = errcallback.previous;
 
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index ab0879138f..a31fef5780 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -132,6 +132,10 @@ extern void setLastTid(const ItemPointer tid);
 extern BulkInsertState GetBulkInsertState(void);
 extern void FreeBulkInsertState(BulkInsertState);
 extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
+extern void CheckAndSetAllVisibleBulkInsertState(Relation relation,
+					BulkInsertState bistate);
+extern void CheckAndSetPageAllVisible(Relation relation,
+					Buffer buffer, Buffer *vmbuffer);
 
 extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 			int options, BulkInsertState bistate);
