On Wed, Nov 16, 2022 at 10:49 AM Peter Eisentraut
<peter.eisentr...@enterprisedb.com> wrote:
>
> On 04.11.22 16:51, Melanie Plageman wrote:
> > Thanks for the review!
> > Attached is v2 with feedback addressed.
>
> Your 0001 had already been pushed.
>
> I have pushed your 0002.
>
> I have also pushed the renaming of page -> block, dp -> page separately.
>   This should reduce the size of your 0003 a bit.
>
> Please produce an updated version of the 0003 page for further review.

Thanks for looking at this!
I have attached a patchset with only the code changes contained in the
previous patch 0003. I have broken the refactoring down into many
smaller pieces for ease of review.

- Melanie
From 2e63656e75b00b24d286533ae8c8ef291876d4a8 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Wed, 30 Nov 2022 14:18:08 -0500
Subject: [PATCH v3 5/7] Add heapgettup() helpers

Add heapgettup_start_page() and heapgettup_continue_page(), helper
functions for heapgettup().
---
 src/backend/access/heap/heapam.c | 93 +++++++++++++++++++++-----------
 1 file changed, 61 insertions(+), 32 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index a4365beee1..f9b2d5cadb 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -575,6 +575,65 @@ heapgettup_initial_page(HeapScanDesc scan, ScanDirection dir)
 	return scan->rs_nblocks - 1;
 }
 
+static inline Page
+heapgettup_start_page(HeapScanDesc scan, BlockNumber block, ScanDirection dir,
+		int *linesleft, OffsetNumber *lineoff)
+{
+	Page page;
+
+	Assert(scan->rs_inited);
+	Assert(BufferIsValid(scan->rs_cbuf));
+
+	/* Caller is responsible for ensuring buffer is locked if needed */
+	page = BufferGetPage(scan->rs_cbuf);
+
+	TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
+
+	*linesleft = PageGetMaxOffsetNumber((Page) page) - FirstOffsetNumber + 1;
+
+	if (ScanDirectionIsForward(dir))
+		*lineoff = FirstOffsetNumber;
+	else
+		*lineoff = (OffsetNumber) (*linesleft);
+
+	/* lineoff now references the physically previous or next tid */
+	return page;
+}
+
+static inline Page
+heapgettup_continue_page(HeapScanDesc scan, BlockNumber block, ScanDirection
+		dir, int *linesleft, OffsetNumber *lineoff)
+{
+	Page page;
+
+	Assert(scan->rs_inited);
+	Assert(BufferIsValid(scan->rs_cbuf));
+
+	/* Caller is responsible for ensuring buffer is locked if needed */
+	page = BufferGetPage(scan->rs_cbuf);
+
+	TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
+
+	if (ScanDirectionIsForward(dir))
+	{
+		*lineoff = OffsetNumberNext(scan->rs_cindex);
+		*linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
+	}
+	else
+	{
+		/*
+		* The previous returned tuple may have been vacuumed since the
+		* previous scan when we use a non-MVCC snapshot, so we must
+		* re-establish the lineoff <= PageGetMaxOffsetNumber(page)
+		* invariant
+		*/
+		*lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_cindex));
+		*linesleft = *lineoff;
+	}
+	/* block and lineoff now reference the physically next tid */
+	return page;
+}
+
 /* ----------------
  *		heapgettup - fetch next heap tuple
  *
@@ -632,45 +691,15 @@ heapgettup(HeapScanDesc scan,
 		}
 
 		heapgetpage((TableScanDesc) scan, block);
-
 		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-
-		linesleft = PageGetMaxOffsetNumber((Page) page) - FirstOffsetNumber + 1;
-
-		if (ScanDirectionIsForward(dir))
-			lineoff = FirstOffsetNumber;
-		else
-			lineoff = (OffsetNumber) linesleft;
+		page = heapgettup_start_page(scan, block, dir, &linesleft, &lineoff);
 	}
 	else
 	{
 		block = scan->rs_cblock; /* current page */
 
 		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
-
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-
-		if (ScanDirectionIsForward(dir))
-		{
-			lineoff = OffsetNumberNext(scan->rs_cindex);
-			/* block and lineoff now reference the physically next tid */
-			linesleft = PageGetMaxOffsetNumber(page) - lineoff + 1;
-		}
-		else
-		{
-			/*
-			 * The previous returned tuple may have been vacuumed since the
-			 * previous scan when we use a non-MVCC snapshot, so we must
-			 * re-establish the lineoff <= PageGetMaxOffsetNumber(page)
-			 * invariant
-			 */
-			lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_cindex));
-			/* block and lineoff now reference the physically previous tid */
-			linesleft = lineoff;
-		}
+		page = heapgettup_continue_page(scan, block, dir, &linesleft, &lineoff);
 	}
 
 	/*
-- 
2.34.1

From 8d4106f9f045999c5b3432fc19ee8f216b85df2d Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Wed, 30 Nov 2022 10:42:12 -0500
Subject: [PATCH v3 2/7] Push lpp variable closer to usage in heapgetpage()

---
 src/backend/access/heap/heapam.c | 41 ++++++++++++++++----------------
 1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index f15296a67a..3da9c81c5b 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -382,7 +382,6 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
 	int			lines;
 	int			ntup;
 	OffsetNumber lineoff;
-	ItemId		lpp;
 	bool		all_visible;
 
 	Assert(block < scan->rs_nblocks);
@@ -451,31 +450,31 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
 	 */
 	all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
 
-	for (lineoff = FirstOffsetNumber, lpp = PageGetItemId(page, lineoff);
-		 lineoff <= lines;
-		 lineoff++, lpp++)
+	for (lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
 	{
-		if (ItemIdIsNormal(lpp))
-		{
-			HeapTupleData loctup;
-			bool		valid;
+		HeapTupleData loctup;
+		bool		valid;
 
-			loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
-			loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
-			loctup.t_len = ItemIdGetLength(lpp);
-			ItemPointerSet(&(loctup.t_self), block, lineoff);
+		ItemId	lpp = PageGetItemId(page, lineoff);
 
-			if (all_visible)
-				valid = true;
-			else
-				valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
+		if (!ItemIdIsNormal(lpp))
+			continue;
 
-			HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
-												&loctup, buffer, snapshot);
+		loctup.t_tableOid = RelationGetRelid(scan->rs_base.rs_rd);
+		loctup.t_data = (HeapTupleHeader) PageGetItem(page, lpp);
+		loctup.t_len = ItemIdGetLength(lpp);
+		ItemPointerSet(&(loctup.t_self), block, lineoff);
 
-			if (valid)
-				scan->rs_vistuples[ntup++] = lineoff;
-		}
+		if (all_visible)
+			valid = true;
+		else
+			valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
+
+		HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
+											&loctup, buffer, snapshot);
+
+		if (valid)
+			scan->rs_vistuples[ntup++] = lineoff;
 	}
 
 	LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-- 
2.34.1

From 2ddb8123f1a8d1d6938c15608a57d09958cfecfd Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Wed, 30 Nov 2022 17:06:43 -0500
Subject: [PATCH v3 3/7] Add heapgettup_initial_page() helper

---
 src/backend/access/heap/heapam.c | 215 ++++++++++++-------------------
 1 file changed, 82 insertions(+), 133 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 3da9c81c5b..142bcc0a1c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -523,6 +523,58 @@ heapgettup_no_movement(HeapScanDesc scan)
 	return;
 }
 
+static inline BlockNumber
+heapgettup_initial_page(HeapScanDesc scan, ScanDirection dir)
+{
+	Assert(!ScanDirectionIsNoMovement(dir));
+	Assert(!scan->rs_inited);
+
+	 /* return null immediately if relation is empty */
+	if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+		return InvalidBlockNumber;
+
+	scan->rs_inited = true;
+
+	/* forward and serial */
+	if (ScanDirectionIsForward(dir) && scan->rs_base.rs_parallel == NULL)
+		return scan->rs_startblock;
+
+	/* forward and parallel */
+	if (ScanDirectionIsForward(dir))
+	{
+		table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
+				scan->rs_parallelworkerdata,
+				(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
+
+		return table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+				scan->rs_parallelworkerdata,
+				(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
+	}
+
+	/* backward parallel scan not supported */
+	Assert(scan->rs_base.rs_parallel == NULL);
+
+	/*
+	 * Disable reporting to syncscan logic in a backwards scan; it's
+	 * not very likely anyone else is doing the same thing at the same
+	 * time, and much more likely that we'll just bollix things for
+	 * forward scanners.
+	 */
+	scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
+
+	/*
+	 * Start from last page of the scan.  Ensure we take into account
+	 * rs_numblocks if it's been adjusted by heap_setscanlimits().
+	 */
+	if (scan->rs_numblocks != InvalidBlockNumber)
+		return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
+
+	if (scan->rs_startblock > 0)
+		return scan->rs_startblock - 1;
+
+	return scan->rs_nblocks - 1;
+}
+
 /* ----------------
  *		heapgettup - fetch next heap tuple
  *
@@ -569,48 +621,20 @@ heapgettup(HeapScanDesc scan,
 		return;
 	}
 
-	/*
-	 * calculate next starting lineoff, given scan direction
-	 */
 	if (ScanDirectionIsForward(dir))
 	{
 		if (!scan->rs_inited)
 		{
-			/*
-			 * return null immediately if relation is empty
-			 */
-			if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+			block = heapgettup_initial_page(scan, dir);
+
+			if (block == InvalidBlockNumber)
 			{
 				Assert(!BufferIsValid(scan->rs_cbuf));
 				tuple->t_data = NULL;
 				return;
 			}
-			if (scan->rs_base.rs_parallel != NULL)
-			{
-				ParallelBlockTableScanDesc pbscan =
-				(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
-				ParallelBlockTableScanWorker pbscanwork =
-				scan->rs_parallelworkerdata;
-
-				table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
-														 pbscanwork, pbscan);
-
-				block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
-														  pbscanwork, pbscan);
-
-				/* Other processes might have already finished the scan. */
-				if (block == InvalidBlockNumber)
-				{
-					Assert(!BufferIsValid(scan->rs_cbuf));
-					tuple->t_data = NULL;
-					return;
-				}
-			}
-			else
-				block = scan->rs_startblock; /* first page */
 			heapgetpage((TableScanDesc) scan, block);
-			lineoff = FirstOffsetNumber;	/* first offnum */
-			scan->rs_inited = true;
+			lineoff = FirstOffsetNumber;
 		}
 		else
 		{
@@ -632,60 +656,36 @@ heapgettup(HeapScanDesc scan,
 	else
 	{
 		Assert(backward);
-		/* backward parallel scan not supported */
-		Assert(scan->rs_base.rs_parallel == NULL);
 
 		if (!scan->rs_inited)
 		{
-			/*
-			 * return null immediately if relation is empty
-			 */
-			if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+			block = heapgettup_initial_page(scan, dir);
+
+			if (block == InvalidBlockNumber)
 			{
 				Assert(!BufferIsValid(scan->rs_cbuf));
 				tuple->t_data = NULL;
 				return;
 			}
 
-			/*
-			 * Disable reporting to syncscan logic in a backwards scan; it's
-			 * not very likely anyone else is doing the same thing at the same
-			 * time, and much more likely that we'll just bollix things for
-			 * forward scanners.
-			 */
-			scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
-
-			/*
-			 * Start from last page of the scan.  Ensure we take into account
-			 * rs_numblocks if it's been adjusted by heap_setscanlimits().
-			 */
-			if (scan->rs_numblocks != InvalidBlockNumber)
-				block = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
-			else if (scan->rs_startblock > 0)
-				block = scan->rs_startblock - 1;
-			else
-				block = scan->rs_nblocks - 1;
 			heapgetpage((TableScanDesc) scan, block);
+			LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+
+			page = BufferGetPage(scan->rs_cbuf);
+			TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
+			lines = PageGetMaxOffsetNumber(page);
+			lineoff = lines;	/* final offnum */
 		}
 		else
 		{
 			/* continue from previously returned page/tuple */
 			block = scan->rs_cblock; /* current page */
-		}
-
-		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+			LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
 
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-		lines = PageGetMaxOffsetNumber(page);
+			page = BufferGetPage(scan->rs_cbuf);
+			TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
+			lines = PageGetMaxOffsetNumber(page);
 
-		if (!scan->rs_inited)
-		{
-			lineoff = lines;	/* final offnum */
-			scan->rs_inited = true;
-		}
-		else
-		{
 			/*
 			 * The previous returned tuple may have been vacuumed since the
 			 * previous scan when we use a non-MVCC snapshot, so we must
@@ -893,41 +893,17 @@ heapgettup_pagemode(HeapScanDesc scan,
 	{
 		if (!scan->rs_inited)
 		{
-			/*
-			 * return null immediately if relation is empty
-			 */
-			if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+			block = heapgettup_initial_page(scan, dir);
+
+			if (block == InvalidBlockNumber)
 			{
 				Assert(!BufferIsValid(scan->rs_cbuf));
 				tuple->t_data = NULL;
 				return;
 			}
-			if (scan->rs_base.rs_parallel != NULL)
-			{
-				ParallelBlockTableScanDesc pbscan =
-				(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
-				ParallelBlockTableScanWorker pbscanwork =
-				scan->rs_parallelworkerdata;
-
-				table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
-														 pbscanwork, pbscan);
-
-				block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
-														  pbscanwork, pbscan);
 
-				/* Other processes might have already finished the scan. */
-				if (block == InvalidBlockNumber)
-				{
-					Assert(!BufferIsValid(scan->rs_cbuf));
-					tuple->t_data = NULL;
-					return;
-				}
-			}
-			else
-				block = scan->rs_startblock; /* first page */
 			heapgetpage((TableScanDesc) scan, block);
 			lineindex = 0;
-			scan->rs_inited = true;
 		}
 		else
 		{
@@ -946,58 +922,31 @@ heapgettup_pagemode(HeapScanDesc scan,
 	else
 	{
 		Assert(backward);
-		/* backward parallel scan not supported */
-		Assert(scan->rs_base.rs_parallel == NULL);
 
 		if (!scan->rs_inited)
 		{
-			/*
-			 * return null immediately if relation is empty
-			 */
-			if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
+			block = heapgettup_initial_page(scan, dir);
+
+			if (block == InvalidBlockNumber)
 			{
 				Assert(!BufferIsValid(scan->rs_cbuf));
 				tuple->t_data = NULL;
 				return;
 			}
 
-			/*
-			 * Disable reporting to syncscan logic in a backwards scan; it's
-			 * not very likely anyone else is doing the same thing at the same
-			 * time, and much more likely that we'll just bollix things for
-			 * forward scanners.
-			 */
-			scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
-
-			/*
-			 * Start from last page of the scan.  Ensure we take into account
-			 * rs_numblocks if it's been adjusted by heap_setscanlimits().
-			 */
-			if (scan->rs_numblocks != InvalidBlockNumber)
-				block = (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
-			else if (scan->rs_startblock > 0)
-				block = scan->rs_startblock - 1;
-			else
-				block = scan->rs_nblocks - 1;
 			heapgetpage((TableScanDesc) scan, block);
+			page = BufferGetPage(scan->rs_cbuf);
+			TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
+			lines = scan->rs_ntuples;
+			lineindex = lines - 1;
 		}
 		else
 		{
 			/* continue from previously returned page/tuple */
 			block = scan->rs_cblock; /* current page */
-		}
-
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-		lines = scan->rs_ntuples;
-
-		if (!scan->rs_inited)
-		{
-			lineindex = lines - 1;
-			scan->rs_inited = true;
-		}
-		else
-		{
+			page = BufferGetPage(scan->rs_cbuf);
+			TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
+			lines = scan->rs_ntuples;
 			lineindex = scan->rs_cindex - 1;
 		}
 		/* block and lineindex now reference the previous visible tid */
-- 
2.34.1

From 38d83796ff3f3060a8dcb244c85864279f080450 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Wed, 30 Nov 2022 10:20:30 -0500
Subject: [PATCH v3 1/7] Add no movement scan helper

No movement scan can be handled first in heapgettup() and
heapgettup_pagemode(). Refactor this case into its own helper function
to improve readability.
---
 src/backend/access/heap/heapam.c | 118 +++++++++++++++----------------
 1 file changed, 56 insertions(+), 62 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 747db50376..f15296a67a 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -484,6 +484,46 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
 	scan->rs_ntuples = ntup;
 }
 
+/*
+ * ``no movement'' scan direction: refetch prior tuple
+ */
+static inline void
+heapgettup_no_movement(HeapScanDesc scan)
+{
+	ItemId		lpp;
+	OffsetNumber lineoff;
+	BlockNumber page;
+	Page dp;
+	HeapTuple	tuple = &(scan->rs_ctup);
+
+	/* The scan must be init'd for there to be a current tuple (rs_ctup) */
+	Assert(scan->rs_inited);
+
+	/* Since the tuple was previously fetched, needn't lock page here */
+	page = ItemPointerGetBlockNumber(&(tuple->t_self));
+	if (page != scan->rs_cblock)
+		heapgetpage((TableScanDesc) scan, page);
+
+	/* Since the tuple was previously fetched, needn't lock page here */
+	dp = BufferGetPage(scan->rs_cbuf);
+	TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, dp);
+	lineoff = scan->rs_cindex;
+	lpp = PageGetItemId(dp, lineoff);
+	Assert(ItemIdIsNormal(lpp));
+
+	tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lpp);
+	tuple->t_len = ItemIdGetLength(lpp);
+
+	/* check that rs_cindex is in sync if in pagemode */
+	Assert(!(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE) ||
+			(scan->rs_cindex < scan->rs_ntuples));
+
+	Assert(!(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE) ||
+			(lineoff == scan->rs_vistuples[scan->rs_cindex]));
+
+	return;
+}
+
 /* ----------------
  *		heapgettup - fetch next heap tuple
  *
@@ -524,6 +564,12 @@ heapgettup(HeapScanDesc scan,
 	int			linesleft;
 	ItemId		lpp;
 
+	if (unlikely(ScanDirectionIsNoMovement(dir)))
+	{
+		heapgettup_no_movement(scan);
+		return;
+	}
+
 	/*
 	 * calculate next starting lineoff, given scan direction
 	 */
@@ -584,8 +630,9 @@ heapgettup(HeapScanDesc scan,
 
 		linesleft = lines - lineoff + 1;
 	}
-	else if (backward)
+	else
 	{
+		Assert(backward);
 		/* backward parallel scan not supported */
 		Assert(scan->rs_base.rs_parallel == NULL);
 
@@ -654,34 +701,6 @@ heapgettup(HeapScanDesc scan,
 
 		linesleft = lineoff;
 	}
-	else
-	{
-		/*
-		 * ``no movement'' scan direction: refetch prior tuple
-		 */
-		if (!scan->rs_inited)
-		{
-			Assert(!BufferIsValid(scan->rs_cbuf));
-			tuple->t_data = NULL;
-			return;
-		}
-
-		block = ItemPointerGetBlockNumber(&(tuple->t_self));
-		if (block != scan->rs_cblock)
-			heapgetpage((TableScanDesc) scan, block);
-
-		/* Since the tuple was previously fetched, needn't lock page here */
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-		lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
-		lpp = PageGetItemId(page, lineoff);
-		Assert(ItemIdIsNormal(lpp));
-
-		tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
-		tuple->t_len = ItemIdGetLength(lpp);
-
-		return;
-	}
 
 	/*
 	 * advance the scan until we find a qualifying tuple or run out of stuff
@@ -862,6 +881,12 @@ heapgettup_pagemode(HeapScanDesc scan,
 	int			linesleft;
 	ItemId		lpp;
 
+	if (unlikely(ScanDirectionIsNoMovement(dir)))
+	{
+		heapgettup_no_movement(scan);
+		return;
+	}
+
 	/*
 	 * calculate next starting lineindex, given scan direction
 	 */
@@ -919,8 +944,9 @@ heapgettup_pagemode(HeapScanDesc scan,
 
 		linesleft = lines - lineindex;
 	}
-	else if (backward)
+	else
 	{
+		Assert(backward);
 		/* backward parallel scan not supported */
 		Assert(scan->rs_base.rs_parallel == NULL);
 
@@ -979,38 +1005,6 @@ heapgettup_pagemode(HeapScanDesc scan,
 
 		linesleft = lineindex + 1;
 	}
-	else
-	{
-		/*
-		 * ``no movement'' scan direction: refetch prior tuple
-		 */
-		if (!scan->rs_inited)
-		{
-			Assert(!BufferIsValid(scan->rs_cbuf));
-			tuple->t_data = NULL;
-			return;
-		}
-
-		block = ItemPointerGetBlockNumber(&(tuple->t_self));
-		if (block != scan->rs_cblock)
-			heapgetpage((TableScanDesc) scan, block);
-
-		/* Since the tuple was previously fetched, needn't lock page here */
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-		lineoff = ItemPointerGetOffsetNumber(&(tuple->t_self));
-		lpp = PageGetItemId(page, lineoff);
-		Assert(ItemIdIsNormal(lpp));
-
-		tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
-		tuple->t_len = ItemIdGetLength(lpp);
-
-		/* check that rs_cindex is in sync */
-		Assert(scan->rs_cindex < scan->rs_ntuples);
-		Assert(lineoff == scan->rs_vistuples[scan->rs_cindex]);
-
-		return;
-	}
 
 	/*
 	 * advance the scan until we find a qualifying tuple or run out of stuff
-- 
2.34.1

From 4d63c4942028d96adb431d4cd6c35330ed6e76f0 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Wed, 30 Nov 2022 12:08:05 -0500
Subject: [PATCH v3 4/7] Streamline heapgettup*() for refactor

Flip initial logic to set local variables in order to make it easier to
add helper functions. Also, remove unneeded local variables.
---
 src/backend/access/heap/heapam.c | 168 ++++++++++---------------------
 src/include/access/heapam.h      |   6 +-
 2 files changed, 58 insertions(+), 116 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 142bcc0a1c..a4365beee1 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -610,7 +610,6 @@ heapgettup(HeapScanDesc scan,
 	BlockNumber block;
 	bool		finished;
 	Page		page;
-	int			lines;
 	OffsetNumber lineoff;
 	int			linesleft;
 	ItemId		lpp;
@@ -621,84 +620,57 @@ heapgettup(HeapScanDesc scan,
 		return;
 	}
 
-	if (ScanDirectionIsForward(dir))
+	if (!scan->rs_inited)
 	{
-		if (!scan->rs_inited)
-		{
-			block = heapgettup_initial_page(scan, dir);
+		block = heapgettup_initial_page(scan, dir);
 
-			if (block == InvalidBlockNumber)
-			{
-				Assert(!BufferIsValid(scan->rs_cbuf));
-				tuple->t_data = NULL;
-				return;
-			}
-			heapgetpage((TableScanDesc) scan, block);
-			lineoff = FirstOffsetNumber;
-		}
-		else
+		if (block == InvalidBlockNumber)
 		{
-			/* continue from previously returned page/tuple */
-			block = scan->rs_cblock; /* current page */
-			lineoff =			/* next offnum */
-				OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
+			Assert(!BufferIsValid(scan->rs_cbuf));
+			tuple->t_data = NULL;
+			return;
 		}
 
-		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+		heapgetpage((TableScanDesc) scan, block);
 
+		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
 		page = BufferGetPage(scan->rs_cbuf);
 		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-		lines = PageGetMaxOffsetNumber(page);
-		/* block and lineoff now reference the physically next tid */
 
-		linesleft = lines - lineoff + 1;
+		linesleft = PageGetMaxOffsetNumber((Page) page) - FirstOffsetNumber + 1;
+
+		if (ScanDirectionIsForward(dir))
+			lineoff = FirstOffsetNumber;
+		else
+			lineoff = (OffsetNumber) linesleft;
 	}
 	else
 	{
-		Assert(backward);
-
-		if (!scan->rs_inited)
-		{
-			block = heapgettup_initial_page(scan, dir);
+		block = scan->rs_cblock; /* current page */
 
-			if (block == InvalidBlockNumber)
-			{
-				Assert(!BufferIsValid(scan->rs_cbuf));
-				tuple->t_data = NULL;
-				return;
-			}
+		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
 
-			heapgetpage((TableScanDesc) scan, block);
-			LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+		page = BufferGetPage(scan->rs_cbuf);
+		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
 
-			page = BufferGetPage(scan->rs_cbuf);
-			TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-			lines = PageGetMaxOffsetNumber(page);
-			lineoff = lines;	/* final offnum */
+		if (ScanDirectionIsForward(dir))
+		{
+			lineoff = OffsetNumberNext(scan->rs_cindex);
+			/* block and lineoff now reference the physically next tid */
+			linesleft = PageGetMaxOffsetNumber(page) - lineoff + 1;
 		}
 		else
 		{
-			/* continue from previously returned page/tuple */
-			block = scan->rs_cblock; /* current page */
-			LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
-
-			page = BufferGetPage(scan->rs_cbuf);
-			TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-			lines = PageGetMaxOffsetNumber(page);
-
 			/*
 			 * The previous returned tuple may have been vacuumed since the
 			 * previous scan when we use a non-MVCC snapshot, so we must
 			 * re-establish the lineoff <= PageGetMaxOffsetNumber(page)
 			 * invariant
 			 */
-			lineoff =			/* previous offnum */
-				Min(lines,
-					OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self))));
+			lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_cindex));
+			/* block and lineoff now reference the physically previous tid */
+			linesleft = lineoff;
 		}
-		/* block and lineoff now reference the physically previous tid */
-
-		linesleft = lineoff;
 	}
 
 	/*
@@ -743,6 +715,7 @@ heapgettup(HeapScanDesc scan,
 				if (valid)
 				{
 					LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+					scan->rs_cindex = lineoff;
 					return;
 				}
 			}
@@ -835,12 +808,11 @@ heapgettup(HeapScanDesc scan,
 
 		page = BufferGetPage(scan->rs_cbuf);
 		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-		lines = PageGetMaxOffsetNumber(page);
-		linesleft = lines;
+		linesleft = PageGetMaxOffsetNumber(page);
 		if (backward)
 		{
-			lineoff = lines;
-			lpp = PageGetItemId(page, lines);
+			lineoff = linesleft;
+			lpp = PageGetItemId(page, linesleft);
 		}
 		else
 		{
@@ -874,7 +846,6 @@ heapgettup_pagemode(HeapScanDesc scan,
 	BlockNumber block;
 	bool		finished;
 	Page		page;
-	int			lines;
 	int			lineindex;
 	OffsetNumber lineoff;
 	int			linesleft;
@@ -886,73 +857,41 @@ heapgettup_pagemode(HeapScanDesc scan,
 		return;
 	}
 
-	/*
-	 * calculate next starting lineindex, given scan direction
-	 */
-	if (ScanDirectionIsForward(dir))
+	if (!scan->rs_inited)
 	{
-		if (!scan->rs_inited)
-		{
-			block = heapgettup_initial_page(scan, dir);
+		block = heapgettup_initial_page(scan, dir);
 
-			if (block == InvalidBlockNumber)
-			{
-				Assert(!BufferIsValid(scan->rs_cbuf));
-				tuple->t_data = NULL;
-				return;
-			}
-
-			heapgetpage((TableScanDesc) scan, block);
-			lineindex = 0;
-		}
-		else
+		if (block == InvalidBlockNumber)
 		{
-			/* continue from previously returned page/tuple */
-			block = scan->rs_cblock; /* current page */
-			lineindex = scan->rs_cindex + 1;
+			Assert(!BufferIsValid(scan->rs_cbuf));
+			tuple->t_data = NULL;
+			return;
 		}
 
+		heapgetpage((TableScanDesc) scan, block);
 		page = BufferGetPage(scan->rs_cbuf);
 		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-		lines = scan->rs_ntuples;
-		/* block and lineindex now reference the next visible tid */
-
-		linesleft = lines - lineindex;
+		linesleft = scan->rs_ntuples;
+		lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
 	}
 	else
 	{
-		Assert(backward);
-
-		if (!scan->rs_inited)
-		{
-			block = heapgettup_initial_page(scan, dir);
+		/* continue from previously returned page/tuple */
+		block = scan->rs_cblock; /* current page */
 
-			if (block == InvalidBlockNumber)
-			{
-				Assert(!BufferIsValid(scan->rs_cbuf));
-				tuple->t_data = NULL;
-				return;
-			}
+		page = BufferGetPage(scan->rs_cbuf);
+		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
 
-			heapgetpage((TableScanDesc) scan, block);
-			page = BufferGetPage(scan->rs_cbuf);
-			TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-			lines = scan->rs_ntuples;
-			lineindex = lines - 1;
-		}
+		lineindex = scan->rs_cindex + dir;
+		if (ScanDirectionIsForward(dir))
+			linesleft = scan->rs_ntuples - lineindex;
 		else
-		{
-			/* continue from previously returned page/tuple */
-			block = scan->rs_cblock; /* current page */
-			page = BufferGetPage(scan->rs_cbuf);
-			TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-			lines = scan->rs_ntuples;
-			lineindex = scan->rs_cindex - 1;
-		}
-		/* block and lineindex now reference the previous visible tid */
-
-		linesleft = lineindex + 1;
+			linesleft = scan->rs_cindex;
 	}
+	/*
+	* block and lineindex now reference the next or previous visible tid
+	*/
+
 
 	/*
 	 * advance the scan until we find a qualifying tuple or run out of stuff
@@ -1066,10 +1005,9 @@ heapgettup_pagemode(HeapScanDesc scan,
 
 		page = BufferGetPage(scan->rs_cbuf);
 		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-		lines = scan->rs_ntuples;
-		linesleft = lines;
+		linesleft = scan->rs_ntuples;
 		if (backward)
-			lineindex = lines - 1;
+			lineindex = linesleft - 1;
 		else
 			lineindex = 0;
 	}
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 810baaf9d0..8db8ae7624 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -71,8 +71,12 @@ typedef struct HeapScanDescData
 	 */
 	ParallelBlockTableScanWorkerData *rs_parallelworkerdata;
 
+	/*
+	 * current tuple's index in vistuples or current lineoff in page
+	 */
+	int			rs_cindex;
+
 	/* these fields only used in page-at-a-time mode and for bitmap scans */
-	int			rs_cindex;		/* current tuple's index in vistuples */
 	int			rs_ntuples;		/* number of visible tuples on page */
 	OffsetNumber rs_vistuples[MaxHeapTuplesPerPage];	/* their offsets */
 }			HeapScanDescData;
-- 
2.34.1

From 5ed8a30fcb49779bf874d5b56e4bd5bf9189a00d Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Wed, 30 Nov 2022 15:38:44 -0500
Subject: [PATCH v3 7/7] Refactor heapgettup() and heapgettup_pagemode()

---
 src/backend/access/heap/heapam.c | 252 ++++++++++++-------------------
 1 file changed, 93 insertions(+), 159 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index f1081ab529..8900490f2c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -732,13 +732,10 @@ heapgettup(HeapScanDesc scan,
 		   ScanKey key)
 {
 	HeapTuple	tuple = &(scan->rs_ctup);
-	Snapshot	snapshot = scan->rs_base.rs_snapshot;
-	bool		backward = ScanDirectionIsBackward(dir);
 	BlockNumber block;
 	Page		page;
 	OffsetNumber lineoff;
 	int			linesleft;
-	ItemId		lpp;
 
 	if (unlikely(ScanDirectionIsNoMovement(dir)))
 	{
@@ -750,32 +747,32 @@ heapgettup(HeapScanDesc scan,
 	{
 		block = heapgettup_initial_page(scan, dir);
 
-		if (block == InvalidBlockNumber)
-		{
-			Assert(!BufferIsValid(scan->rs_cbuf));
-			tuple->t_data = NULL;
-			return;
-		}
-
-		heapgetpage((TableScanDesc) scan, block);
-		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
-		page = heapgettup_start_page(scan, block, dir, &linesleft, &lineoff);
+		/*
+		 * If parallel and other processes have already finished the scan, the
+		 * returned block is expected to be InvalidBlockNumber. In this case,
+		 * ensure that the backend is not sitting on a valid buffer.
+		 */
+		Assert(block != InvalidBlockNumber || !BufferIsValid(scan->rs_cbuf));
 	}
 	else
 	{
 		block = scan->rs_cblock; /* current page */
-
 		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
 		page = heapgettup_continue_page(scan, block, dir, &linesleft, &lineoff);
+		goto continue_page;
 	}
 
 	/*
 	 * advance the scan until we find a qualifying tuple or run out of stuff
 	 * to scan
 	 */
-	lpp = PageGetItemId(page, lineoff);
-	for (;;)
+	while (block != InvalidBlockNumber)
 	{
+		heapgetpage((TableScanDesc) scan, block);
+		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+		page = heapgettup_start_page(scan, block, dir, &linesleft, &lineoff);
+	continue_page:
+
 		/*
 		 * Only continue scanning the page while we have lines left.
 		 *
@@ -783,53 +780,40 @@ heapgettup(HeapScanDesc scan,
 		 * PageGetMaxOffsetNumber(); both for forward scans when we resume the
 		 * table scan, and for when we start scanning a new page.
 		 */
-		while (linesleft > 0)
+		for (; linesleft > 0; linesleft--, lineoff += dir)
 		{
-			if (ItemIdIsNormal(lpp))
-			{
-				bool		valid;
+			bool	visible;
+			ItemId lpp = PageGetItemId(page, lineoff);
 
-				tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
-				tuple->t_len = ItemIdGetLength(lpp);
-				ItemPointerSet(&(tuple->t_self), block, lineoff);
-
-				/*
-				 * if current tuple qualifies, return it.
-				 */
-				valid = HeapTupleSatisfiesVisibility(tuple,
-													 snapshot,
-													 scan->rs_cbuf);
+			if (!ItemIdIsNormal(lpp))
+				continue;
 
-				HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
-													tuple, scan->rs_cbuf,
-													snapshot);
+			tuple->t_data = (HeapTupleHeader) PageGetItem((Page) page, lpp);
+			tuple->t_len = ItemIdGetLength(lpp);
+			ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
 
-				if (valid && key != NULL)
-					valid = HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
-										nkeys, key);
+			/*
+			* if current tuple qualifies, return it.
+			* otherwise move to the next item on the block
+			*/
+			visible = HeapTupleSatisfiesVisibility(tuple,
+													scan->rs_base.rs_snapshot,
+													scan->rs_cbuf);
+
+			HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
+												tuple, scan->rs_cbuf,
+												scan->rs_base.rs_snapshot);
+
+			if (!visible)
+				continue;
 
-				if (valid)
-				{
-					LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
-					scan->rs_cindex = lineoff;
-					return;
-				}
-			}
+			if (key && !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
+						nkeys, key))
+				continue;
 
-			/*
-			 * otherwise move to the next item on the page
-			 */
-			--linesleft;
-			if (backward)
-			{
-				--lpp;			/* move back in this page's ItemId array */
-				--lineoff;
-			}
-			else
-			{
-				++lpp;			/* move forward in this page's ItemId array */
-				++lineoff;
-			}
+			LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
+			scan->rs_cindex = lineoff;
+			return;
 		}
 
 		/*
@@ -839,39 +823,16 @@ heapgettup(HeapScanDesc scan,
 		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
 
 		block = heapgettup_advance_page(scan, block, dir);
+	}
 
-		/*
-		 * return NULL if we've exhausted all the pages
-		 */
-		if (block == InvalidBlockNumber)
-		{
-			if (BufferIsValid(scan->rs_cbuf))
-				ReleaseBuffer(scan->rs_cbuf);
-			scan->rs_cbuf = InvalidBuffer;
-			scan->rs_cblock = InvalidBlockNumber;
-			tuple->t_data = NULL;
-			scan->rs_inited = false;
-			return;
-		}
-
-		heapgetpage((TableScanDesc) scan, block);
-
-		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
+	/* end of scan */
+	if (BufferIsValid(scan->rs_cbuf))
+		ReleaseBuffer(scan->rs_cbuf);
 
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(snapshot, scan->rs_base.rs_rd, page);
-		linesleft = PageGetMaxOffsetNumber(page);
-		if (backward)
-		{
-			lineoff = linesleft;
-			lpp = PageGetItemId(page, linesleft);
-		}
-		else
-		{
-			lineoff = FirstOffsetNumber;
-			lpp = PageGetItemId(page, FirstOffsetNumber);
-		}
-	}
+	scan->rs_cbuf = InvalidBuffer;
+	scan->rs_cblock = InvalidBlockNumber;
+	tuple->t_data = NULL;
+	scan->rs_inited = false;
 }
 
 /* ----------------
@@ -894,13 +855,10 @@ heapgettup_pagemode(HeapScanDesc scan,
 					ScanKey key)
 {
 	HeapTuple	tuple = &(scan->rs_ctup);
-	bool		backward = ScanDirectionIsBackward(dir);
 	BlockNumber block;
 	Page		page;
 	int			lineindex;
-	OffsetNumber lineoff;
 	int			linesleft;
-	ItemId		lpp;
 
 	if (unlikely(ScanDirectionIsNoMovement(dir)))
 	{
@@ -912,18 +870,12 @@ heapgettup_pagemode(HeapScanDesc scan,
 	{
 		block = heapgettup_initial_page(scan, dir);
 
-		if (block == InvalidBlockNumber)
-		{
-			Assert(!BufferIsValid(scan->rs_cbuf));
-			tuple->t_data = NULL;
-			return;
-		}
-
-		heapgetpage((TableScanDesc) scan, block);
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-		linesleft = scan->rs_ntuples;
-		lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
+		/*
+		 * If parallel and other processes have already finished the scan, the
+		 * returned block is expected to be InvalidBlockNumber. In this case,
+		 * ensure that the backend is not sitting on a valid buffer.
+		 */
+		Assert(block != InvalidBlockNumber || !BufferIsValid(scan->rs_cbuf));
 	}
 	else
 	{
@@ -938,84 +890,66 @@ heapgettup_pagemode(HeapScanDesc scan,
 			linesleft = scan->rs_ntuples - lineindex;
 		else
 			linesleft = scan->rs_cindex;
-	}
-	/*
-	* block and lineindex now reference the next or previous visible tid
-	*/
+		/* block and lineindex now reference the next or previous visible tid */
 
+		goto continue_page;
+	}
 
 	/*
 	 * advance the scan until we find a qualifying tuple or run out of stuff
 	 * to scan
 	 */
-	for (;;)
+	while (block != InvalidBlockNumber)
 	{
-		while (linesleft > 0)
+		heapgetpage((TableScanDesc) scan, block);
+		page = BufferGetPage(scan->rs_cbuf);
+		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
+		linesleft = scan->rs_ntuples;
+		lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
+
+		/* block and lineindex now reference the next or previous visible tid */
+
+	continue_page:
+
+		for (; linesleft > 0; linesleft--, lineindex += dir)
 		{
+			ItemId		lpp;
+			OffsetNumber lineoff;
+
 			lineoff = scan->rs_vistuples[lineindex];
 			lpp = PageGetItemId(page, lineoff);
 			Assert(ItemIdIsNormal(lpp));
 
-			tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
+			tuple->t_data = (HeapTupleHeader) PageGetItem((Page) page, lpp);
 			tuple->t_len = ItemIdGetLength(lpp);
 			ItemPointerSet(&(tuple->t_self), block, lineoff);
 
 			/*
-			 * if current tuple qualifies, return it.
-			 */
-			if (key != NULL)
-			{
-				bool		valid;
-
-				valid = HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
-									nkeys, key);
-				if (valid)
-				{
-					scan->rs_cindex = lineindex;
-					return;
-				}
-			}
-			else
-			{
-				scan->rs_cindex = lineindex;
-				return;
-			}
+			* if current tuple qualifies, return it.
+			* otherwise move to the next item on the block
+			*/
+			if (key && !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
+						nkeys, key))
+				continue;
 
-			/*
-			 * otherwise move to the next item on the page
-			 */
-			--linesleft;
-			if (backward)
-				--lineindex;
-			else
-				++lineindex;
+			scan->rs_cindex = lineindex;
+			return;
 		}
 
-		block = heapgettup_advance_page(scan, block, dir);
 		/*
-		 * return NULL if we've exhausted all the pages
+		 * if we get here, it means we've exhausted the items on this page and
+		 * it's time to move to the next.
 		 */
-		if (block == InvalidBlockNumber)
-		{
-			if (BufferIsValid(scan->rs_cbuf))
-				ReleaseBuffer(scan->rs_cbuf);
-			scan->rs_cbuf = InvalidBuffer;
-			scan->rs_cblock = InvalidBlockNumber;
-			tuple->t_data = NULL;
-			scan->rs_inited = false;
-			return;
-		}
-
-		heapgetpage((TableScanDesc) scan, block);
-
-		page = BufferGetPage(scan->rs_cbuf);
-		TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page);
-		linesleft = scan->rs_ntuples;
-		if (backward)
-			lineindex = linesleft - 1;
-		else
-			lineindex = 0;
+		block = heapgettup_advance_page(scan, block, dir);
 	}
+
+	/* end of scan */
+	if (BufferIsValid(scan->rs_cbuf))
+		ReleaseBuffer(scan->rs_cbuf);
+	scan->rs_cbuf = InvalidBuffer;
+	scan->rs_cblock = InvalidBlockNumber;
+	tuple->t_data = NULL;
+	scan->rs_inited = false;
 }
 
 
-- 
2.34.1

From bc7332551da42cc1798b23f422c8d06f9057713a Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplage...@gmail.com>
Date: Wed, 30 Nov 2022 15:06:34 -0500
Subject: [PATCH v3 6/7] Add heapgettup_advance_page()

---
 src/backend/access/heap/heapam.c | 168 +++++++++++++------------------
 1 file changed, 72 insertions(+), 96 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index f9b2d5cadb..f1081ab529 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -634,6 +634,74 @@ heapgettup_continue_page(HeapScanDesc scan, BlockNumber block, ScanDirection
 	return page;
 }
 
+static inline BlockNumber
+heapgettup_advance_page(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
+{
+	if (ScanDirectionIsBackward(dir))
+	{
+		if (block == scan->rs_startblock)
+			return InvalidBlockNumber;
+
+		if (scan->rs_numblocks != InvalidBlockNumber)
+		{
+			if (--scan->rs_numblocks == 0)
+				return InvalidBlockNumber;
+		}
+
+		if (block == 0)
+			block = scan->rs_nblocks;
+
+		block--;
+
+		return block;
+	}
+	else if (scan->rs_base.rs_parallel != NULL)
+	{
+		Assert(ScanDirectionIsForward(dir));
+
+		block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
+				scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
+				scan->rs_base.rs_parallel);
+
+		return block;
+	}
+	else
+	{
+		Assert(ScanDirectionIsForward(dir));
+
+		block++;
+
+		if (block >= scan->rs_nblocks)
+			block = 0;
+
+		if (block == scan->rs_startblock)
+			return InvalidBlockNumber;
+
+		if (scan->rs_numblocks != InvalidBlockNumber)
+		{
+			if (--scan->rs_numblocks == 0)
+				return InvalidBlockNumber;
+		}
+
+		/*
+		* Report our new scan position for synchronization purposes. We
+		* don't do that when moving backwards, however. That would just
+		* mess up any other forward-moving scanners.
+		*
+		* Note: we do this before checking for end of scan so that the
+		* final state of the position hint is back at the start of the
+		* rel.  That's not strictly necessary, but otherwise when you run
+		* the same query multiple times the starting position would shift
+		* a little bit backwards on every invocation, which is confusing.
+		* We don't guarantee any specific ordering in general, though.
+		*/
+		if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
+			ss_report_location(scan->rs_base.rs_rd, block);
+
+		return block;
+	}
+}
+
 /* ----------------
  *		heapgettup - fetch next heap tuple
  *
@@ -667,7 +735,6 @@ heapgettup(HeapScanDesc scan,
 	Snapshot	snapshot = scan->rs_base.rs_snapshot;
 	bool		backward = ScanDirectionIsBackward(dir);
 	BlockNumber block;
-	bool		finished;
 	Page		page;
 	OffsetNumber lineoff;
 	int			linesleft;
@@ -771,56 +838,12 @@ heapgettup(HeapScanDesc scan,
 		 */
 		LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
 
-		/*
-		 * advance to next/prior page and detect end of scan
-		 */
-		if (backward)
-		{
-			finished = (block == scan->rs_startblock) ||
-				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
-			if (block == 0)
-				block = scan->rs_nblocks;
-			block--;
-		}
-		else if (scan->rs_base.rs_parallel != NULL)
-		{
-			ParallelBlockTableScanDesc pbscan =
-			(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
-			ParallelBlockTableScanWorker pbscanwork =
-			scan->rs_parallelworkerdata;
-
-			block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
-													 pbscanwork, pbscan);
-			finished = (block == InvalidBlockNumber);
-		}
-		else
-		{
-			block++;
-			if (block >= scan->rs_nblocks)
-				block = 0;
-			finished = (block == scan->rs_startblock) ||
-				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
-
-			/*
-			 * Report our new scan position for synchronization purposes. We
-			 * don't do that when moving backwards, however. That would just
-			 * mess up any other forward-moving scanners.
-			 *
-			 * Note: we do this before checking for end of scan so that the
-			 * final state of the position hint is back at the start of the
-			 * rel.  That's not strictly necessary, but otherwise when you run
-			 * the same query multiple times the starting position would shift
-			 * a little bit backwards on every invocation, which is confusing.
-			 * We don't guarantee any specific ordering in general, though.
-			 */
-			if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
-				ss_report_location(scan->rs_base.rs_rd, block);
-		}
+		block = heapgettup_advance_page(scan, block, dir);
 
 		/*
 		 * return NULL if we've exhausted all the pages
 		 */
-		if (finished)
+		if (block == InvalidBlockNumber)
 		{
 			if (BufferIsValid(scan->rs_cbuf))
 				ReleaseBuffer(scan->rs_cbuf);
@@ -873,7 +896,6 @@ heapgettup_pagemode(HeapScanDesc scan,
 	HeapTuple	tuple = &(scan->rs_ctup);
 	bool		backward = ScanDirectionIsBackward(dir);
 	BlockNumber block;
-	bool		finished;
 	Page		page;
 	int			lineindex;
 	OffsetNumber lineoff;
@@ -969,57 +991,11 @@ heapgettup_pagemode(HeapScanDesc scan,
 				++lineindex;
 		}
 
-		/*
-		 * if we get here, it means we've exhausted the items on this page and
-		 * it's time to move to the next.
-		 */
-		if (backward)
-		{
-			finished = (block == scan->rs_startblock) ||
-				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
-			if (block == 0)
-				block = scan->rs_nblocks;
-			block--;
-		}
-		else if (scan->rs_base.rs_parallel != NULL)
-		{
-			ParallelBlockTableScanDesc pbscan =
-			(ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
-			ParallelBlockTableScanWorker pbscanwork =
-			scan->rs_parallelworkerdata;
-
-			block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
-													 pbscanwork, pbscan);
-			finished = (block == InvalidBlockNumber);
-		}
-		else
-		{
-			block++;
-			if (block >= scan->rs_nblocks)
-				block = 0;
-			finished = (block == scan->rs_startblock) ||
-				(scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false);
-
-			/*
-			 * Report our new scan position for synchronization purposes. We
-			 * don't do that when moving backwards, however. That would just
-			 * mess up any other forward-moving scanners.
-			 *
-			 * Note: we do this before checking for end of scan so that the
-			 * final state of the position hint is back at the start of the
-			 * rel.  That's not strictly necessary, but otherwise when you run
-			 * the same query multiple times the starting position would shift
-			 * a little bit backwards on every invocation, which is confusing.
-			 * We don't guarantee any specific ordering in general, though.
-			 */
-			if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
-				ss_report_location(scan->rs_base.rs_rd, block);
-		}
-
+		block = heapgettup_advance_page(scan, block, dir);
 		/*
 		 * return NULL if we've exhausted all the pages
 		 */
-		if (finished)
+		if (block == InvalidBlockNumber)
 		{
 			if (BufferIsValid(scan->rs_cbuf))
 				ReleaseBuffer(scan->rs_cbuf);
-- 
2.34.1

Reply via email to