From 026ff14da2689bb6e6603eb351823a806840c2e9 Mon Sep 17 00:00:00 2001
From: Masahiko Sawada <sawada.mshk@gmail.com>
Date: Mon, 25 Mar 2024 16:09:45 +0900
Subject: [PATCH v79 6/6] Address review comments on vacuum integration.

---
 src/backend/access/heap/vacuumlazy.c  | 36 ++++++++++++++++-----------
 src/backend/commands/vacuum.c         |  5 ++--
 src/backend/commands/vacuumparallel.c | 22 ++++++++--------
 3 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 82c5a2c690..a0ff7526f3 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -3,7 +3,7 @@
  * vacuumlazy.c
  *	  Concurrent ("lazy") vacuuming.
  *
- * The major space usage for vacuuming is TidStore, a storage for dead TIDs
+ * The major space usage for vacuuming is TID store, a storage for dead TIDs
  * that are to be removed from indexes.  We want to ensure we can vacuum even
  * the very largest relations with finite memory space usage.  To do that, we
  * set upper bounds on the maximum memory that can be used for keeping track
@@ -11,7 +11,7 @@
  *
  * We are willing to use at most maintenance_work_mem (or perhaps
  * autovacuum_work_mem) memory space to keep track of dead TIDs.  If the
- * TidStore is full, we must call lazy_vacuum to vacuum indexes (and to vacuum
+ * TID store is full, we must call lazy_vacuum to vacuum indexes (and to vacuum
  * the pages that we've pruned). This frees up the memory space dedicated to
  * to store dead TIDs.
  *
@@ -939,7 +939,7 @@ lazy_scan_heap(LVRelState *vacrel)
 
 		/*
 		 * If we didn't get the cleanup lock, we can still collect LP_DEAD
-		 * items in the dead_items for later vacuuming, count live and
+		 * items in the dead_items area for later vacuuming, count live and
 		 * recently dead tuples for vacuum logging, and determine if this
 		 * block could later be truncated. If we encounter any xid/mxids that
 		 * require advancing the relfrozenxid/relminxid, we'll have to wait
@@ -967,9 +967,9 @@ lazy_scan_heap(LVRelState *vacrel)
 		 * Like lazy_scan_noprune(), lazy_scan_prune() will count
 		 * recently_dead_tuples and live tuples for vacuum logging, determine
 		 * if the block can later be truncated, and accumulate the details of
-		 * remaining LP_DEAD line pointers on the page in the dead_items.
+		 * remaining LP_DEAD line pointers on the page into the dead_items.
 		 * These dead items include those pruned by lazy_scan_prune() as well
-		 * we line pointers previously marked LP_DEAD.
+		 * as line pointers previously marked LP_DEAD.
 		 */
 		if (got_cleanup_lock)
 			lazy_scan_prune(vacrel, buf, blkno, page,
@@ -2459,8 +2459,9 @@ lazy_vacuum_heap_rel(LVRelState *vacrel)
 			vacuumed_pages == vacrel->lpdead_item_pages));
 
 	ereport(DEBUG2,
-			(errmsg("table \"%s\": removed " INT64_FORMAT "dead item identifiers in %u pages",
-					vacrel->relname, vacrel->dead_items_info->num_items, vacuumed_pages)));
+			(errmsg("table \"%s\": removed %lld dead item identifiers in %u pages",
+					vacrel->relname, (long long) vacrel->dead_items_info->num_items,
+					vacuumed_pages)));
 
 	/* Revert to the previous phase information for error traceback */
 	restore_vacuum_error_info(vacrel, &saved_err_info);
@@ -3102,8 +3103,8 @@ count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected)
 }
 
 /*
- * Allocate dead_items (either using palloc, or in dynamic shared memory).
- * Sets dead_items in vacrel for caller.
+ * Allocate dead_items and dead_items_info (either using palloc, or in dynamic
+ * shared memory). Sets both in vacrel for caller.
  *
  * Also handles parallel initialization as part of allocating dead_items in
  * DSM when required.
@@ -3114,7 +3115,7 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
 	VacDeadItemsInfo *dead_items_info;
 	size_t			vac_work_mem = AmAutoVacuumWorkerProcess() &&
 		autovacuum_work_mem != -1 ?
-		autovacuum_work_mem * 1024L : maintenance_work_mem * 1024L;
+		autovacuum_work_mem : maintenance_work_mem;
 
 	/*
 	 * Initialize state for a parallel vacuum.  As of now, only one worker can
@@ -3145,7 +3146,10 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
 											   vacrel->verbose ? INFO : DEBUG2,
 											   vacrel->bstrategy);
 
-		/* If parallel mode started, dead_items space is allocated in DSM */
+		/*
+		 * If parallel mode started, dead_items and dead_items_info spaces are
+		 * allocated in DSM.
+		 */
 		if (ParallelVacuumIsActive(vacrel))
 		{
 			vacrel->dead_items = parallel_vacuum_get_dead_items(vacrel->pvs,
@@ -3154,13 +3158,17 @@ dead_items_alloc(LVRelState *vacrel, int nworkers)
 		}
 	}
 
-	/* Serial VACUUM case */
-	vacrel->dead_items = TidStoreCreateLocal(vac_work_mem);
+	/*
+	 * Serial VACUUM case. Allocate both dead_items and dead_items_info
+	 * locally.
+	 */
 
 	dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));
-	dead_items_info->max_bytes = vac_work_mem;
+	dead_items_info->max_bytes = vac_work_mem * 1024L;
 	dead_items_info->num_items = 0;
 	vacrel->dead_items_info = dead_items_info;
+
+	vacrel->dead_items = TidStoreCreateLocal(dead_items_info->max_bytes);
 }
 
 /*
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 72299b0838..b589279d49 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -2495,8 +2495,9 @@ vac_bulkdel_one_index(IndexVacuumInfo *ivinfo, IndexBulkDeleteResult *istat,
 							  (void *) dead_items);
 
 	ereport(ivinfo->message_level,
-			(errmsg("scanned index \"%s\" to remove " INT64_FORMAT " row versions",
-					RelationGetRelationName(ivinfo->index), dead_items_info->num_items)));
+			(errmsg("scanned index \"%s\" to remove %lld row versions",
+					RelationGetRelationName(ivinfo->index),
+					(long long) dead_items_info->num_items)));
 
 	return istat;
 }
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 719055a734..233a111111 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -45,11 +45,10 @@
  * use small integers.
  */
 #define PARALLEL_VACUUM_KEY_SHARED			1
-/* 2 was PARALLEL_VACUUM_KEY_DEAD_ITEMS */
-#define PARALLEL_VACUUM_KEY_QUERY_TEXT		3
-#define PARALLEL_VACUUM_KEY_BUFFER_USAGE	4
-#define PARALLEL_VACUUM_KEY_WAL_USAGE		5
-#define PARALLEL_VACUUM_KEY_INDEX_STATS		6
+#define PARALLEL_VACUUM_KEY_QUERY_TEXT		2
+#define PARALLEL_VACUUM_KEY_BUFFER_USAGE	3
+#define PARALLEL_VACUUM_KEY_WAL_USAGE		4
+#define PARALLEL_VACUUM_KEY_INDEX_STATS		5
 
 /*
  * Shared information among parallel workers.  So this is allocated in the DSM
@@ -365,10 +364,6 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 	shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_INDEX_STATS, indstats);
 	pvs->indstats = indstats;
 
-	/* Prepare DSA space for dead items */
-	dead_items = TidStoreCreateShared(vac_work_mem, LWTRANCHE_PARALLEL_VACUUM_DSA);
-	pvs->dead_items = dead_items;
-
 	/* Prepare shared information */
 	shared = (PVShared *) shm_toc_allocate(pcxt->toc, est_shared_len);
 	MemSet(shared, 0, est_shared_len);
@@ -378,9 +373,14 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 		(nindexes_mwm > 0) ?
 		maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
 		maintenance_work_mem;
-	shared->dead_items_dsa_handle = dsa_get_handle(TidStoreGetDSA(dead_items));
+	shared->dead_items_info.max_bytes = vac_work_mem * 1024L;
+
+	/* Prepare DSA space for dead items */
+	dead_items = TidStoreCreateShared(shared->dead_items_info.max_bytes,
+									  LWTRANCHE_PARALLEL_VACUUM_DSA);
+	pvs->dead_items = dead_items;
 	shared->dead_items_handle = TidStoreGetHandle(dead_items);
-	shared->dead_items_info.max_bytes = vac_work_mem;
+	shared->dead_items_dsa_handle = dsa_get_handle(TidStoreGetDSA(dead_items));
 
 	/* Use the same buffer size for all workers */
 	shared->ring_nbuffers = GetAccessStrategyBufferCount(bstrategy);
-- 
2.39.3

