diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index a07686d..11bae78 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -30,14 +30,85 @@
 #include "executor/nodeIndexonlyscan.h"
 #include "executor/nodeIndexscan.h"
 #include "storage/bufmgr.h"
+#include "storage/smgr.h"
 #include "utils/memutils.h"
 #include "utils/rel.h"
 
+/*
+ * Size of the bitmap on each visibility map page, in bytes. There's no
+ * extra headers, so the whole page minus the standard page header is
+ * used for the bitmap.
+ */
+#define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
+
+/* Number of bits allocated for each heap block. */
+#define BITS_PER_HEAPBLOCK 1
+
+/* Number of heap blocks we can represent in one byte. */
+#define HEAPBLOCKS_PER_BYTE 8
+
+/* Number of heap blocks we can represent in one visibility map page. */
+#define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
+
+/* Mapping from heap block number to the right bit in the visibility map */
+#define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
+#define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
+#define HEAPBLK_TO_MAPBIT(x) ((x) % HEAPBLOCKS_PER_BYTE)
 
 static TupleTableSlot *IndexOnlyNext(IndexOnlyScanState *node);
 static void StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup,
 				TupleDesc itupdesc);
 
+/*
+ * Read a visibility map page.
+ *
+ * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
+ * true, the visibility map file is extended.
+ */
+static Buffer
+vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
+{
+	Buffer		buf;
+
+	RelationOpenSmgr(rel);
+
+	/*
+	 * If we haven't cached the size of the visibility map fork yet, check it
+	 * first.  Also recheck if the requested block seems to be past end, since
+	 * our cached value might be stale.  (We send smgr inval messages on
+	 * truncation, but not on extension.)
+	 */
+	if (rel->rd_smgr->smgr_vm_nblocks == InvalidBlockNumber ||
+		blkno >= rel->rd_smgr->smgr_vm_nblocks)
+	{
+		if (smgrexists(rel->rd_smgr, VISIBILITYMAP_FORKNUM))
+			rel->rd_smgr->smgr_vm_nblocks = smgrnblocks(rel->rd_smgr,
+													  VISIBILITYMAP_FORKNUM);
+		else
+			rel->rd_smgr->smgr_vm_nblocks = 0;
+	}
+
+	/* Handle requests beyond EOF */
+	if (blkno >= rel->rd_smgr->smgr_vm_nblocks)
+	{
+		if (extend)
+			vm_extend(rel, blkno + 1);
+		else
+			return InvalidBuffer;
+	}
+
+	/*
+	 * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
+	 * always safe to clear bits, so it's better to clear corrupt pages than
+	 * error out.
+	 */
+	buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
+							 RBM_ZERO_ON_ERROR, NULL);
+	if (PageIsNew(BufferGetPage(buf)))
+		PageInit(BufferGetPage(buf), BLCKSZ, 0);
+	return buf;
+}
+
 
 /* ----------------------------------------------------------------
  *		IndexOnlyNext
@@ -78,14 +149,52 @@ IndexOnlyNext(IndexOnlyScanState *node)
 	 */
 	while ((tid = index_getnext_tid(scandesc, direction)) != NULL)
 	{
+		Relation	rel = scandesc->heapRelation;
+		BlockNumber	heapBlk = ItemPointerGetBlockNumber(tid);
+		Buffer	   *buf = &node->ioss_VMBuffer;
+		bool		result;
+
+		{
+			BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
+			uint32		mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
+			uint8		mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
+			char	   *map;
+
+			/* Reuse the old pinned buffer if possible */
+			if (BufferIsValid(*buf))
+			{
+				if (BufferGetBlockNumber(*buf) != mapBlock)
+				{
+					ReleaseBuffer(*buf);
+					*buf = InvalidBuffer;
+				}
+			}
+
+			if (!BufferIsValid(*buf))
+			{
+				*buf = vm_readbuf(rel, mapBlock, false);
+				if (!BufferIsValid(*buf))
+				{
+					result = false;
+					goto l1;
+				}
+			}
+
+			map = PageGetContents(BufferGetPage(*buf));
+
+			/*
+			 * We don't need to lock the page, as we're only looking at a single bit.
+			 */
+			result = (map[mapByte] & (1 << mapBit)) ? true : false;
+		}
+
 		/*
 		 * We can skip the heap fetch if the TID references a heap page on
 		 * which all tuples are known visible to everybody.  In any case,
 		 * we'll use the index tuple not the heap tuple as the data source.
 		 */
-		if (!visibilitymap_test(scandesc->heapRelation,
-								ItemPointerGetBlockNumber(tid),
-								&node->ioss_VMBuffer))
+l1:
+		if (!result)
 		{
 			/*
 			 * Rats, we have to visit the heap to check visibility.
