diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index a880c81..f32460a 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -269,14 +269,15 @@ void
 tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids,
 			   bool recheck)
 {
-	int			i;
+	int				i;
+	PagetableEntry *page = NULL;
+	BlockNumber		lossy_page = InvalidBlockNumber;
 
 	Assert(!tbm->iterating);
 	for (i = 0; i < ntids; i++)
 	{
 		BlockNumber blk = ItemPointerGetBlockNumber(tids + i);
 		OffsetNumber off = ItemPointerGetOffsetNumber(tids + i);
-		PagetableEntry *page;
 		int			wordnum,
 					bitnum;
 
@@ -284,10 +285,30 @@ tbm_add_tuples(TIDBitmap *tbm, const ItemPointer tids, int ntids,
 		if (off < 1 || off > MAX_TUPLES_PER_PAGE)
 			elog(ERROR, "tuple offset out of range: %u", off);
 
-		if (tbm_page_is_lossy(tbm, blk))
-			continue;			/* whole page is already marked */
+		if (lossy_page == blk)
+			continue;	/* this one is already lossy. Skip it. */
+
+		if (page == NULL || page->blockno != blk)
+		{
+			if (tbm_page_is_lossy(tbm, blk))
+			{
+				/*
+				 * Whole page is already marked as lossy. We'll record the last
+				 * blk we found lossy so that we can skip the hash table lookup
+				 * on the next call, if we happen to get the same block next
+				 * loop.
+				 */
+				lossy_page = blk;
+				continue;
+			}
 
-		page = tbm_get_pageentry(tbm, blk);
+			/*
+			 * We'll cache this page as it's quite likely that on the next loop
+			 * we'll be seeing the same page again. This will save from having
+			 * to lookup the page in the hashtable again.
+			 */
+			page = tbm_get_pageentry(tbm, blk);
+		}
 
 		if (page->ischunk)
 		{
