From 660d39f2a31882427522fe48387922dcd4091101 Mon Sep 17 00:00:00 2001
From: Mikhail Nikalayeu <mihailnikalayeu@gmail.com>
Date: Mon, 16 Jun 2025 22:20:38 +0200
Subject: [PATCH v10 2/2] Fix btree index scan concurrency issues with dirty
 snapshots

This patch addresses an issue where non-MVCC index scans using SnapshotDirty or SnapshotSelf could miss tuples due to concurrent modifications. The fix retains read locks on pages for these special snapshot types until the scan is done with the page's tuples, preventing concurrent modifications from causing inconsistent results.

Updated README to document this special case in the btree locking mechanism.
---
 src/backend/access/nbtree/README       | 13 ++++++++++++-
 src/backend/access/nbtree/nbtree.c     | 19 ++++++++++++++++++-
 src/backend/access/nbtree/nbtsearch.c  | 16 ++++++++++++----
 src/backend/access/nbtree/nbtutils.c   |  4 +++-
 src/backend/executor/execReplication.c |  8 ++++++--
 src/include/access/nbtree.h            |  1 +
 6 files changed, 52 insertions(+), 9 deletions(-)

diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README
index 53d4a61dc3f..a9280415633 100644
--- a/src/backend/access/nbtree/README
+++ b/src/backend/access/nbtree/README
@@ -85,7 +85,8 @@ move right until we find a page whose right-link matches the page we
 came from.  (Actually, it's even harder than that; see page deletion
 discussion below.)
 
-Page read locks are held only for as long as a scan is examining a page.
+Page read locks are held only for as long as a scan is examining a page
+(with exception for SnapshotDirty and SnapshotSelf scans - see below).
 To minimize lock/unlock traffic, an index scan always searches a leaf page
 to identify all the matching items at once, copying their heap tuple IDs
 into backend-local storage.  The heap tuple IDs are then processed while
@@ -103,6 +104,16 @@ We also remember the left-link, and follow it when the scan moves backwards
 (though this requires extra handling to account for concurrent splits of
 the left sibling; see detailed move-left algorithm below).
 
+Despite the described mechanics in place, inconsistent results may still occur
+during non-MVCC scans (SnapshotDirty and SnapshotSelf). This issue can occur if a 
+concurrent transaction deletes a tuple and inserts a new tuple with a new TID in the 
+same page. If the scan has already visited the page and cached its content in the
+backend-local storage, it might skip the old tuple due to deletion and miss the new 
+tuple because the scan does not re-read the page. To address this issue, for 
+SnapshotDirty and SnapshotSelf scans, we retain the read lock on the page until 
+we're completely done processing all the tuples from that page, preventing 
+concurrent modifications that could lead to inconsistent results.
+
 In most cases we release our lock and pin on a page before attempting
 to acquire pin and lock on the page we are moving to.  In a few places
 it is necessary to lock the next page before releasing the current one.
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index fdff960c130..bda2b821a51 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -393,10 +393,22 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
 		/* Before leaving current page, deal with any killed items */
 		if (so->numKilled > 0)
 			_bt_killitems(scan);
+		else if (!so->dropLock) /* _bt_killitems always releases lock */
+			_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 		BTScanPosUnpinIfPinned(so->currPos);
 		BTScanPosInvalidate(so->currPos);
 	}
 
+	/*
+	 * For SnapshotDirty and SnapshotSelf scans, we don't unlock the buffer
+	 * and keep the lock should be until we're completely done with this page.
+	 * This prevents concurrent modifications from causing inconsistent
+	 * results during non-MVCC scans.
+	 *
+	 * See nbtree/README for information about SnapshotDirty and SnapshotSelf.
+	 */
+	so->dropLock = scan->xs_snapshot->snapshot_type != SNAPSHOT_DIRTY
+					&& scan->xs_snapshot->snapshot_type != SNAPSHOT_SELF;
 	/*
 	 * We prefer to eagerly drop leaf page pins before btgettuple returns.
 	 * This avoids making VACUUM wait to acquire a cleanup lock on the page.
@@ -420,7 +432,8 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
 	 *
 	 * Note: so->dropPin should never change across rescans.
 	 */
-	so->dropPin = (!scan->xs_want_itup &&
+	so->dropPin = (so->dropLock &&
+				   !scan->xs_want_itup &&
 				   IsMVCCSnapshot(scan->xs_snapshot) &&
 				   RelationNeedsWAL(scan->indexRelation) &&
 				   scan->heapRelation != NULL);
@@ -477,6 +490,8 @@ btendscan(IndexScanDesc scan)
 		/* Before leaving current page, deal with any killed items */
 		if (so->numKilled > 0)
 			_bt_killitems(scan);
+		else if (!so->dropLock) /* _bt_killitems always releases lock */
+			_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 		BTScanPosUnpinIfPinned(so->currPos);
 	}
 
@@ -557,6 +572,8 @@ btrestrpos(IndexScanDesc scan)
 			/* Before leaving current page, deal with any killed items */
 			if (so->numKilled > 0)
 				_bt_killitems(scan);
+			else if (!so->dropLock) /* _bt_killitems always releases lock */
+				_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 			BTScanPosUnpinIfPinned(so->currPos);
 		}
 
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index d69798795b4..f92dba17fa4 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -57,12 +57,14 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
 /*
  *	_bt_drop_lock_and_maybe_pin()
  *
- * Unlock so->currPos.buf.  If scan is so->dropPin, drop the pin, too.
+ * Unlock so->currPos.buf if so->dropLock. If scan is so->dropPin, drop the pin, too.
  * Dropping the pin prevents VACUUM from blocking on acquiring a cleanup lock.
  */
 static inline void
 _bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so)
 {
+	if (!so->dropLock)
+		return;
 	if (!so->dropPin)
 	{
 		/* Just drop the lock (not the pin) */
@@ -1579,7 +1581,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
  *	_bt_next() -- Get the next item in a scan.
  *
  *		On entry, so->currPos describes the current page, which may be pinned
- *		but is not locked, and so->currPos.itemIndex identifies which item was
+ *		but is not locked (except for SnapshotDirty and SnapshotSelf scans, where
+ *		the page remains locked), and so->currPos.itemIndex identifies which item was
  *		previously returned.
  *
  *		On success exit, so->currPos is updated as needed, and _bt_returnitem
@@ -2158,7 +2161,9 @@ _bt_returnitem(IndexScanDesc scan, BTScanOpaque so)
  * Wrapper on _bt_readnextpage that performs final steps for the current page.
  *
  * On entry, so->currPos must be valid.  Its buffer will be pinned, though
- * never locked. (Actually, when so->dropPin there won't even be a pin held,
+ * never locked, except for SnapshotDirty and SnapshotSelf scans where the buffer
+ * remains locked until we're done with all tuples from the page
+ * (Actually, when so->dropPin there won't even be a pin held,
  * though so->currPos.currPage must still be set to a valid block number.)
  */
 static bool
@@ -2173,6 +2178,8 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir)
 	/* Before leaving current page, deal with any killed items */
 	if (so->numKilled > 0)
 		_bt_killitems(scan);
+	else if (!so->dropLock) /* _bt_killitems always releases lock */
+		_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 
 	/*
 	 * Before we modify currPos, make a copy of the page data if there was a
@@ -2312,7 +2319,8 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
 	}
 
 	/* There's no actually-matching data on the page in so->currPos.buf */
-	_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
+	if (so->dropLock)
+		_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
 
 	/* Call _bt_readnextpage using its _bt_steppage wrapper function */
 	if (!_bt_steppage(scan, dir))
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index edfea2acaff..56d5bf44785 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -3283,8 +3283,10 @@ _bt_killitems(IndexScanDesc scan)
 		 * concurrent VACUUMs from recycling any of the TIDs on the page.
 		 */
 		Assert(BTScanPosIsPinned(so->currPos));
+		/* Lock only if the lock is dropped. */
 		buf = so->currPos.buf;
-		_bt_lockbuf(rel, buf, BT_READ);
+		if (so->dropLock)
+			_bt_lockbuf(rel, buf, BT_READ);
 	}
 	else
 	{
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index da0cbf41d6f..c2f5aa2ba5c 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -205,12 +205,11 @@ RelationFindReplTupleByIndex(Relation rel, Oid idxoid,
 
 	/* Start an index scan. */
 	scan = index_beginscan(rel, idxrel, &snap, NULL, skey_attoff, 0);
+	index_rescan(scan, skey, skey_attoff, NULL, 0);
 
 retry:
 	found = false;
 
-	index_rescan(scan, skey, skey_attoff, NULL, 0);
-
 	/* Try to find the tuple */
 	while (index_getnext_slot(scan, ForwardScanDirection, outslot))
 	{
@@ -238,6 +237,8 @@ retry:
 		 */
 		if (TransactionIdIsValid(xwait))
 		{
+			/* We need to call rescan before wait to ensure we release all the index page locks. */
+			index_rescan(scan, skey, skey_attoff, NULL, 0);
 			XactLockTableWait(xwait, NULL, NULL, XLTW_None);
 			goto retry;
 		}
@@ -266,7 +267,10 @@ retry:
 		PopActiveSnapshot();
 
 		if (should_refetch_tuple(res, &tmfd))
+		{
+			index_rescan(scan, skey, skey_attoff, NULL, 0);
 			goto retry;
+		}
 	}
 
 	index_endscan(scan);
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index 9ab467cb8fd..9c10931c8e2 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -1069,6 +1069,7 @@ typedef struct BTScanOpaqueData
 	/* info about killed items if any (killedItems is NULL if never used) */
 	int		   *killedItems;	/* currPos.items indexes of killed items */
 	int			numKilled;		/* number of currently stored items */
+	bool		dropLock;		/* drop lock on before btgettuple returns? */
 	bool		dropPin;		/* drop leaf pin before btgettuple returns? */
 
 	/*
-- 
2.43.0

