From cca81d53f95706ed4af3c05795b8cf15ce3492f4 Mon Sep 17 00:00:00 2001
From: Peter Geoghegan <pg@bowt.ie>
Date: Tue, 9 Mar 2021 12:58:39 -0800
Subject: [PATCH v2 1/2] Don't consider newly inserted tuples in nbtree VACUUM.

Skip full index scan during a VACUUM for nbtree indexes in the case
where VACUUM never called btbulkdelete(), even when pg_class stats for
the index relation would be considered "stale" by criteria applied using
vacuum_cleanup_index_scale_factor.  Rely on ANALYZE instead -- it can be
relied on to keep pg_class.reltuples up to date, per the amvacuumcleanup
contract.  Also remove the vacuum_cleanup_index_scale_factor GUC/param
in passing (though just disable it on the Postgres 13 branch).

VACUUM will still do scans of the index despite never reaching
btbulkdelete() in one remaining case:  the case where a previous VACUUM
operation is known to have performed index page deletion of pages that
have yet be placed in the free space map for recycling.

Backpatch to Postgres 13 due to an unanticipated interaction with the
autovacuum_vacuum_insert_threshold feature added by commit b07642db and
the "skip full scan" feature added by commit 857f9c36.  This interaction
has been tied to a regression with an append-only insert benchmark [1].
It is reasonable to expect a certain amount of overhead from vacuuming
that just sets visibility map bits, but it does not seem reasonable to
perform a full index scans in btvacuumcleanup() purely to set the
pg_class.reltuples stats in affected indexes.

There is another reason to backpatch: a bugfix commit tied to the nbtree
deduplication feature (bugfix commit 48e12913) taught nbtree VACUUM to
track IndexBulkDeleteResult.num_index_tuples using an inherently
approximate approach.  This made sense -- getting an accurate count in
the presence of posting list tuples just isn't worth the cycles for a
btvacuumcleanup()-only VACUUM.  But btvacuumcleanup() should still
indicate that its final num_index_tuples value is just an estimate when
its approximate approach to tracking live tuples gets used.

Have btvacuumcleanup() acknowledge that its approach to counting is
approximate by setting IndexBulkDeleteResult.estimated_count to 'true'.
This prevents vacuumlazy.c from setting the index's pg_class.reltuples
to a value that significantly underestimates the number of live tuples
(at least in certain scenarios with large posting list tuples).  This is
the same approach that hashvacuumcleanup() has always taken.  Index AMs
have had the option of giving only approximate num_index_tuples
statistics since commit e57345975cf, which updated the relevant index AM
API.

[1] https://smalldatum.blogspot.com/2021/01/insert-benchmark-postgres-is-still.html

Author: Peter Geoghegan <pg@bowt.ie>
Reviewed-By: Masahiko Sawada <sawada.mshk@gmail.com>
Discussion: https://postgr.es/m/CAD21AoA4WHthN5uU6+WScZ7+J_RcEjmcuH94qcoUPuB42ShXzg@mail.gmail.com
Backpatch: 13-, where autovacuum_vacuum_insert_threshold was added.
---
 src/include/access/nbtree.h                   |  5 +-
 src/include/access/nbtxlog.h                  |  1 -
 src/include/access/xlog_internal.h            |  2 +-
 src/include/miscadmin.h                       |  2 -
 src/backend/access/common/reloptions.c        |  9 ---
 src/backend/access/nbtree/nbtinsert.c         |  3 -
 src/backend/access/nbtree/nbtpage.c           | 40 ++++------
 src/backend/access/nbtree/nbtree.c            | 75 ++++++-------------
 src/backend/access/nbtree/nbtutils.c          |  2 -
 src/backend/access/nbtree/nbtxlog.c           |  2 +-
 src/backend/access/rmgrdesc/nbtdesc.c         |  5 +-
 src/backend/utils/init/globals.c              |  2 -
 src/backend/utils/misc/guc.c                  | 10 ---
 src/backend/utils/misc/postgresql.conf.sample |  3 -
 src/bin/psql/tab-complete.c                   |  4 +-
 doc/src/sgml/config.sgml                      | 40 ----------
 doc/src/sgml/ref/create_index.sgml            | 14 ----
 src/test/regress/expected/btree_index.out     | 29 -------
 src/test/regress/sql/btree_index.sql          | 19 -----
 19 files changed, 44 insertions(+), 223 deletions(-)

diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index b56b7b7868..c9d6000cbe 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -1067,8 +1067,6 @@ typedef struct BTOptions
 {
 	int32		varlena_header_;	/* varlena header (do not touch directly!) */
 	int			fillfactor;		/* page fill factor in percent (0..100) */
-	/* fraction of newly inserted tuples needed to trigger index cleanup */
-	float8		vacuum_cleanup_index_scale_factor;
 	bool		deduplicate_items;	/* Try to deduplicate items? */
 } BTOptions;
 
@@ -1171,8 +1169,7 @@ extern OffsetNumber _bt_findsplitloc(Relation rel, Page origpage,
  */
 extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
 							 bool allequalimage);
-extern void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages,
-								 float8 num_heap_tuples);
+extern void _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages);
 extern void _bt_upgrademetapage(Page page);
 extern Buffer _bt_getroot(Relation rel, int access);
 extern Buffer _bt_gettrueroot(Relation rel);
diff --git a/src/include/access/nbtxlog.h b/src/include/access/nbtxlog.h
index 3df34fcda2..0f7731856b 100644
--- a/src/include/access/nbtxlog.h
+++ b/src/include/access/nbtxlog.h
@@ -54,7 +54,6 @@ typedef struct xl_btree_metadata
 	BlockNumber fastroot;
 	uint32		fastlevel;
 	uint32		last_cleanup_num_delpages;
-	float8		last_cleanup_num_heap_tuples;
 	bool		allequalimage;
 } xl_btree_metadata;
 
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index 8d09eaec93..b23e286406 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -31,7 +31,7 @@
 /*
  * Each page of XLOG file has a header like this:
  */
-#define XLOG_PAGE_MAGIC 0xD10A	/* can be used as WAL version indicator */
+#define XLOG_PAGE_MAGIC 0xD10B	/* can be used as WAL version indicator */
 
 typedef struct XLogPageHeaderData
 {
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 1bdc97e308..54693e047a 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -261,8 +261,6 @@ extern int64 VacuumPageDirty;
 extern int	VacuumCostBalance;
 extern bool VacuumCostActive;
 
-extern double vacuum_cleanup_index_scale_factor;
-
 
 /* in tcop/postgres.c */
 
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index c687d3ee9e..433e236722 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -461,15 +461,6 @@ static relopt_real realRelOpts[] =
 		},
 		0, -1.0, DBL_MAX
 	},
-	{
-		{
-			"vacuum_cleanup_index_scale_factor",
-			"Number of tuple inserts prior to index cleanup as a fraction of reltuples.",
-			RELOPT_KIND_BTREE,
-			ShareUpdateExclusiveLock
-		},
-		-1, 0.0, 1e10
-	},
 	/* list terminator */
 	{{NULL}}
 };
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 1edb9f9579..0bc86943eb 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -1332,8 +1332,6 @@ _bt_insertonpg(Relation rel,
 					xlmeta.fastroot = metad->btm_fastroot;
 					xlmeta.fastlevel = metad->btm_fastlevel;
 					xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
-					xlmeta.last_cleanup_num_heap_tuples =
-						metad->btm_last_cleanup_num_heap_tuples;
 					xlmeta.allequalimage = metad->btm_allequalimage;
 
 					XLogRegisterBuffer(2, metabuf,
@@ -2549,7 +2547,6 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
 		md.fastroot = rootblknum;
 		md.fastlevel = metad->btm_level;
 		md.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
-		md.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
 		md.allequalimage = metad->btm_allequalimage;
 
 		XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index c09e492a5f..4a0578dff4 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -175,26 +175,15 @@ _bt_getmeta(Relation rel, Buffer metabuf)
  *		_bt_vacuum_needs_cleanup() to decide whether or not a btvacuumscan()
  *		call should go ahead for an entire VACUUM operation.
  *
- *		See btvacuumcleanup() and _bt_vacuum_needs_cleanup() for details of
- *		the two fields that we maintain here.
- *
- *		The information that we maintain for btvacuumcleanup() describes the
- *		state of the index (as well as the table it indexes) just _after_ the
- *		ongoing VACUUM operation.  The next _bt_vacuum_needs_cleanup() call
- *		will consider the information we saved for it during the next VACUUM
- *		operation (assuming that there will be no btbulkdelete() call during
- *		the next VACUUM operation -- if there is then the question of skipping
- *		btvacuumscan() doesn't even arise).
+ *		See btvacuumcleanup() and _bt_vacuum_needs_cleanup() for the
+ *		definition of num_delpages.
  */
 void
-_bt_set_cleanup_info(Relation rel, BlockNumber num_delpages,
-					 float8 num_heap_tuples)
+_bt_set_cleanup_info(Relation rel, BlockNumber num_delpages)
 {
 	Buffer		metabuf;
 	Page		metapg;
 	BTMetaPageData *metad;
-	bool		rewrite = false;
-	XLogRecPtr	recptr;
 
 	/*
 	 * On-disk compatibility note: The btm_last_cleanup_num_delpages metapage
@@ -209,21 +198,20 @@ _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages,
 	 * in reality there are only one or two.  The worst that can happen is
 	 * that there will be a call to btvacuumscan a little earlier, which will
 	 * set btm_last_cleanup_num_delpages to a sane value when we're called.
+	 *
+	 * Note also that the metapage's btm_last_cleanup_num_heap_tuples field is
+	 * no longer used as of PostgreSQL 14.  We set it to -1.0 on rewrite, just
+	 * to be consistent.
 	 */
 	metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
 	metapg = BufferGetPage(metabuf);
 	metad = BTPageGetMeta(metapg);
 
-	/* Always dynamically upgrade index/metapage when BTREE_MIN_VERSION */
-	if (metad->btm_version < BTREE_NOVAC_VERSION)
-		rewrite = true;
-	else if (metad->btm_last_cleanup_num_delpages != num_delpages)
-		rewrite = true;
-	else if (metad->btm_last_cleanup_num_heap_tuples != num_heap_tuples)
-		rewrite = true;
-
-	if (!rewrite)
+	/* Don't miss chance to upgrade index/metapage when BTREE_MIN_VERSION */
+	if (metad->btm_version >= BTREE_NOVAC_VERSION &&
+		metad->btm_last_cleanup_num_delpages == num_delpages)
 	{
+		/* Usually means index continues to have num_delpages of 0 */
 		_bt_relbuf(rel, metabuf);
 		return;
 	}
@@ -240,13 +228,14 @@ _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages,
 
 	/* update cleanup-related information */
 	metad->btm_last_cleanup_num_delpages = num_delpages;
-	metad->btm_last_cleanup_num_heap_tuples = num_heap_tuples;
+	metad->btm_last_cleanup_num_heap_tuples = -1.0;
 	MarkBufferDirty(metabuf);
 
 	/* write wal record if needed */
 	if (RelationNeedsWAL(rel))
 	{
 		xl_btree_metadata md;
+		XLogRecPtr	recptr;
 
 		XLogBeginInsert();
 		XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
@@ -258,7 +247,6 @@ _bt_set_cleanup_info(Relation rel, BlockNumber num_delpages,
 		md.fastroot = metad->btm_fastroot;
 		md.fastlevel = metad->btm_fastlevel;
 		md.last_cleanup_num_delpages = num_delpages;
-		md.last_cleanup_num_heap_tuples = num_heap_tuples;
 		md.allequalimage = metad->btm_allequalimage;
 
 		XLogRegisterBufData(0, (char *) &md, sizeof(xl_btree_metadata));
@@ -443,7 +431,6 @@ _bt_getroot(Relation rel, int access)
 			md.fastroot = rootblkno;
 			md.fastlevel = 0;
 			md.last_cleanup_num_delpages = 0;
-			md.last_cleanup_num_heap_tuples = -1.0;
 			md.allequalimage = metad->btm_allequalimage;
 
 			XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
@@ -2632,7 +2619,6 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
 			xlmeta.fastroot = metad->btm_fastroot;
 			xlmeta.fastlevel = metad->btm_fastlevel;
 			xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
-			xlmeta.last_cleanup_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
 			xlmeta.allequalimage = metad->btm_allequalimage;
 
 			XLogRegisterBufData(4, (char *) &xlmeta, sizeof(xl_btree_metadata));
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 504f5bef17..fefdf3f955 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -789,11 +789,8 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
 	Buffer		metabuf;
 	Page		metapg;
 	BTMetaPageData *metad;
-	BTOptions  *relopts;
-	float8		cleanup_scale_factor;
 	uint32		btm_version;
 	BlockNumber prev_num_delpages;
-	float8		prev_num_heap_tuples;
 
 	/*
 	 * Copy details from metapage to local variables quickly.
@@ -816,32 +813,8 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
 	}
 
 	prev_num_delpages = metad->btm_last_cleanup_num_delpages;
-	prev_num_heap_tuples = metad->btm_last_cleanup_num_heap_tuples;
 	_bt_relbuf(info->index, metabuf);
 
-	/*
-	 * If the underlying table has received a sufficiently high number of
-	 * insertions since the last VACUUM operation that called btvacuumscan(),
-	 * then have the current VACUUM operation call btvacuumscan() now.  This
-	 * happens when the statistics are deemed stale.
-	 *
-	 * XXX: We should have a more principled way of determining what
-	 * "staleness" means. The  vacuum_cleanup_index_scale_factor GUC (and the
-	 * index-level storage param) seem hard to tune in a principled way.
-	 */
-	relopts = (BTOptions *) info->index->rd_options;
-	cleanup_scale_factor = (relopts &&
-							relopts->vacuum_cleanup_index_scale_factor >= 0)
-		? relopts->vacuum_cleanup_index_scale_factor
-		: vacuum_cleanup_index_scale_factor;
-
-	if (cleanup_scale_factor <= 0 ||
-		info->num_heap_tuples < 0 ||
-		prev_num_heap_tuples <= 0 ||
-		(info->num_heap_tuples - prev_num_heap_tuples) /
-		prev_num_heap_tuples >= cleanup_scale_factor)
-		return true;
-
 	/*
 	 * Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
 	 * total size of the index.  We can reasonably expect (though are not
@@ -925,48 +898,46 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 
 		/*
 		 * Since we aren't going to actually delete any leaf items, there's no
-		 * need to go through all the vacuum-cycle-ID pushups here
+		 * need to go through all the vacuum-cycle-ID pushups here.
+		 *
+		 * Posting list tuples are a source of inaccuracy for cleanup-only
+		 * scans.  btvacuumscan() will assume that the number of index tuples
+		 * from each page can be used as num_index_tuples, even though
+		 * num_index_tuples is supposed to represent the number of TIDs in the
+		 * index.  This naive approach can underestimate the number of tuples
+		 * in the index significantly.
+		 *
+		 * We handle the problem by making num_index_tuples an estimate in
+		 * cleanup-only case.
 		 */
 		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
 		btvacuumscan(info, stats, NULL, NULL, 0);
+		stats->estimated_count = true;
 	}
 
 	/*
 	 * By here, we know for sure that this VACUUM operation won't be skipping
-	 * its btvacuumscan() call.  Maintain the count of the current number of
-	 * heap tuples in the metapage.  Also maintain the num_delpages value.
-	 * This information will be used by _bt_vacuum_needs_cleanup() during
-	 * future VACUUM operations that don't need to call btbulkdelete().
+	 * its btvacuumscan() call.  Maintain  the num_delpages value.  This
+	 * information will be used by _bt_vacuum_needs_cleanup() during future
+	 * VACUUM operations that don't need to call btbulkdelete().
 	 *
 	 * num_delpages is the number of deleted pages now in the index that were
 	 * not safe to place in the FSM to be recycled just yet.  We expect that
 	 * it will almost certainly be possible to place all of these pages in the
-	 * FSM during the next VACUUM operation.  That factor alone might cause
-	 * _bt_vacuum_needs_cleanup() to force the next VACUUM to proceed with a
-	 * btvacuumscan() call.
-	 *
-	 * Note: We must delay the _bt_set_cleanup_info() call until this late
-	 * stage of VACUUM (the btvacuumcleanup() phase), to keep num_heap_tuples
-	 * accurate.  The btbulkdelete()-time num_heap_tuples value is generally
-	 * just pg_class.reltuples for the heap relation _before_ VACUUM began.
-	 * In general cleanup info should describe the state of the index/table
-	 * _after_ VACUUM finishes.
+	 * FSM during the next VACUUM operation.  _bt_vacuum_needs_cleanup() will
+	 * force the next VACUUM to consider this before allowing btvacuumscan()
+	 * to be skipped entirely.  This should be rare -- cleanup-only VACUUMs
+	 * almost always manage to skip btvacuumscan() in practice.
 	 */
 	Assert(stats->pages_deleted >= stats->pages_free);
 	num_delpages = stats->pages_deleted - stats->pages_free;
-	_bt_set_cleanup_info(info->index, num_delpages, info->num_heap_tuples);
+	_bt_set_cleanup_info(info->index, num_delpages);
 
 	/*
 	 * It's quite possible for us to be fooled by concurrent page splits into
 	 * double-counting some index tuples, so disbelieve any total that exceeds
 	 * the underlying heap's count ... if we know that accurately.  Otherwise
 	 * this might just make matters worse.
-	 *
-	 * Posting list tuples are another source of inaccuracy.  Cleanup-only
-	 * btvacuumscan calls assume that the number of index tuples can be used
-	 * as num_index_tuples, even though num_index_tuples is supposed to
-	 * represent the number of TIDs in the index.  This naive approach can
-	 * underestimate the number of tuples in the index.
 	 */
 	if (!info->estimated_count)
 	{
@@ -1016,7 +987,6 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	 * pages in the index at the end of the VACUUM command.)
 	 */
 	stats->num_pages = 0;
-	stats->estimated_count = false;
 	stats->num_index_tuples = 0;
 	stats->pages_deleted = 0;
 	stats->pages_free = 0;
@@ -1421,7 +1391,10 @@ backtrack:
 		 * We don't count the number of live TIDs during cleanup-only calls to
 		 * btvacuumscan (i.e. when callback is not set).  We count the number
 		 * of index tuples directly instead.  This avoids the expense of
-		 * directly examining all of the tuples on each page.
+		 * directly examining all of the tuples on each page.  VACUUM will
+		 * treat num_index_tuples as an estimate in cleanup-only case, so it
+		 * doesn't matter that this underestimates num_index_tuples
+		 * significantly in some cases.
 		 */
 		if (minoff > maxoff)
 			attempt_pagedel = (blkno == scanblkno);
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index d524310723..fdbe0da472 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -2105,8 +2105,6 @@ btoptions(Datum reloptions, bool validate)
 {
 	static const relopt_parse_elt tab[] = {
 		{"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
-		{"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
-		offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
 		{"deduplicate_items", RELOPT_TYPE_BOOL,
 		offsetof(BTOptions, deduplicate_items)}
 
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 990f5d0f52..1779b6ba47 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -113,7 +113,7 @@ _bt_restore_meta(XLogReaderState *record, uint8 block_id)
 	/* Cannot log BTREE_MIN_VERSION index metapage without upgrade */
 	Assert(md->btm_version >= BTREE_NOVAC_VERSION);
 	md->btm_last_cleanup_num_delpages = xlrec->last_cleanup_num_delpages;
-	md->btm_last_cleanup_num_heap_tuples = xlrec->last_cleanup_num_heap_tuples;
+	md->btm_last_cleanup_num_heap_tuples = -1.0;
 	md->btm_allequalimage = xlrec->allequalimage;
 
 	pageop = (BTPageOpaque) PageGetSpecialPointer(metapg);
diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c
index f7cc4dd3e6..710efbd36a 100644
--- a/src/backend/access/rmgrdesc/nbtdesc.c
+++ b/src/backend/access/rmgrdesc/nbtdesc.c
@@ -113,9 +113,8 @@ btree_desc(StringInfo buf, XLogReaderState *record)
 
 				xlrec = (xl_btree_metadata *) XLogRecGetBlockData(record, 0,
 																  NULL);
-				appendStringInfo(buf, "last_cleanup_num_delpages %u; last_cleanup_num_heap_tuples: %f",
-								 xlrec->last_cleanup_num_delpages,
-								 xlrec->last_cleanup_num_heap_tuples);
+				appendStringInfo(buf, "last_cleanup_num_delpages %u",
+								 xlrec->last_cleanup_num_delpages);
 				break;
 			}
 	}
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index a5976ad5b1..73e0a672ae 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -148,5 +148,3 @@ int64		VacuumPageDirty = 0;
 
 int			VacuumCostBalance = 0;	/* working state for vacuum */
 bool		VacuumCostActive = false;
-
-double		vacuum_cleanup_index_scale_factor;
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 3fd1a5fbe2..df199851cc 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3694,16 +3694,6 @@ static struct config_real ConfigureNamesReal[] =
 		NULL, NULL, NULL
 	},
 
-	{
-		{"vacuum_cleanup_index_scale_factor", PGC_USERSET, CLIENT_CONN_STATEMENT,
-			gettext_noop("Number of tuple inserts prior to index cleanup as a fraction of reltuples."),
-			NULL
-		},
-		&vacuum_cleanup_index_scale_factor,
-		0.1, 0.0, 1e10,
-		NULL, NULL, NULL
-	},
-
 	{
 		{"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN,
 			gettext_noop("Fraction of statements exceeding log_min_duration_sample to be logged."),
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index ee06528bb0..3736c972a8 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -671,9 +671,6 @@
 #vacuum_freeze_table_age = 150000000
 #vacuum_multixact_freeze_min_age = 5000000
 #vacuum_multixact_freeze_table_age = 150000000
-#vacuum_cleanup_index_scale_factor = 0.1	# fraction of total number of tuples
-						# before index cleanup, 0 always performs
-						# index cleanup
 #bytea_output = 'hex'			# hex, escape
 #xmlbinary = 'base64'
 #xmloption = 'content'
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 9f0208ac49..ecdb8d752b 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -1789,14 +1789,14 @@ psql_completion(const char *text, int start, int end)
 	/* ALTER INDEX <foo> SET|RESET ( */
 	else if (Matches("ALTER", "INDEX", MatchAny, "RESET", "("))
 		COMPLETE_WITH("fillfactor",
-					  "vacuum_cleanup_index_scale_factor", "deduplicate_items", /* BTREE */
+					  "deduplicate_items", /* BTREE */
 					  "fastupdate", "gin_pending_list_limit",	/* GIN */
 					  "buffering",	/* GiST */
 					  "pages_per_range", "autosummarize"	/* BRIN */
 			);
 	else if (Matches("ALTER", "INDEX", MatchAny, "SET", "("))
 		COMPLETE_WITH("fillfactor =",
-					  "vacuum_cleanup_index_scale_factor =", "deduplicate_items =", /* BTREE */
+					  "deduplicate_items =", /* BTREE */
 					  "fastupdate =", "gin_pending_list_limit =",	/* GIN */
 					  "buffering =",	/* GiST */
 					  "pages_per_range =", "autosummarize ="	/* BRIN */
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 967de73596..81e597a80b 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -8523,46 +8523,6 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
       </listitem>
      </varlistentry>
 
-     <varlistentry id="guc-vacuum-cleanup-index-scale-factor" xreflabel="vacuum_cleanup_index_scale_factor">
-      <term><varname>vacuum_cleanup_index_scale_factor</varname> (<type>floating point</type>)
-      <indexterm>
-       <primary><varname>vacuum_cleanup_index_scale_factor</varname></primary>
-       <secondary>configuration parameter</secondary>
-      </indexterm>
-      </term>
-      <listitem>
-       <para>
-        Specifies the fraction of the total number of heap tuples counted in
-        the previous statistics collection that can be inserted without
-        incurring an index scan at the <command>VACUUM</command> cleanup stage.
-        This setting currently applies to B-tree indexes only.
-       </para>
-
-       <para>
-        If no tuples were deleted from the heap, B-tree indexes are still
-        scanned at the <command>VACUUM</command> cleanup stage when the
-        index's statistics are stale.  Index statistics are considered
-        stale if the number of newly inserted tuples exceeds the
-        <varname>vacuum_cleanup_index_scale_factor</varname>
-        fraction of the total number of heap tuples detected by the previous
-        statistics collection. The total number of heap tuples is stored in
-        the index meta-page. Note that the meta-page does not include this data
-        until <command>VACUUM</command> finds no dead tuples, so B-tree index
-        scan at the cleanup stage can only be skipped if the second and
-        subsequent <command>VACUUM</command> cycles detect no dead tuples.
-       </para>
-
-       <para>
-        The value can range from <literal>0</literal> to
-        <literal>10000000000</literal>.
-        When <varname>vacuum_cleanup_index_scale_factor</varname> is set to
-        <literal>0</literal>, index scans are never skipped during
-        <command>VACUUM</command> cleanup. The default value is <literal>0.1</literal>.
-       </para>
-
-      </listitem>
-     </varlistentry>
-
      <varlistentry id="guc-bytea-output" xreflabel="bytea_output">
       <term><varname>bytea_output</varname> (<type>enum</type>)
       <indexterm>
diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml
index 51b4d57939..bc57adf7d5 100644
--- a/doc/src/sgml/ref/create_index.sgml
+++ b/doc/src/sgml/ref/create_index.sgml
@@ -456,20 +456,6 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS ] <replaceable class=
     </note>
     </listitem>
    </varlistentry>
-
-   <varlistentry id="index-reloption-vacuum-cleanup-index-scale-factor" xreflabel="vacuum_cleanup_index_scale_factor">
-    <term><literal>vacuum_cleanup_index_scale_factor</literal> (<type>floating point</type>)
-     <indexterm>
-      <primary><varname>vacuum_cleanup_index_scale_factor</varname></primary>
-      <secondary>storage parameter</secondary>
-     </indexterm>
-    </term>
-    <listitem>
-    <para>
-      Per-index value for <xref linkend="guc-vacuum-cleanup-index-scale-factor"/>.
-    </para>
-    </listitem>
-   </varlistentry>
    </variablelist>
 
    <para>
diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out
index cfd4338e36..bc113a70b4 100644
--- a/src/test/regress/expected/btree_index.out
+++ b/src/test/regress/expected/btree_index.out
@@ -308,35 +308,6 @@ alter table btree_tall_tbl alter COLUMN t set storage plain;
 create index btree_tall_idx on btree_tall_tbl (t, id) with (fillfactor = 10);
 insert into btree_tall_tbl select g, repeat('x', 250)
 from generate_series(1, 130) g;
---
--- Test vacuum_cleanup_index_scale_factor
---
--- Simple create
-create table btree_test(a int);
-create index btree_idx1 on btree_test(a) with (vacuum_cleanup_index_scale_factor = 40.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
-                reloptions                
-------------------------------------------
- {vacuum_cleanup_index_scale_factor=40.0}
-(1 row)
-
--- Fail while setting improper values
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = -10.0);
-ERROR:  value -10.0 out of bounds for option "vacuum_cleanup_index_scale_factor"
-DETAIL:  Valid values are between "0.000000" and "10000000000.000000".
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 100.0);
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 'string');
-ERROR:  invalid value for floating point option "vacuum_cleanup_index_scale_factor": string
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = true);
-ERROR:  invalid value for floating point option "vacuum_cleanup_index_scale_factor": true
--- Simple ALTER INDEX
-alter index btree_idx1 set (vacuum_cleanup_index_scale_factor = 70.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
-                reloptions                
-------------------------------------------
- {vacuum_cleanup_index_scale_factor=70.0}
-(1 row)
-
 --
 -- Test for multilevel page deletion
 --
diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql
index 96f53818ff..c60312db2d 100644
--- a/src/test/regress/sql/btree_index.sql
+++ b/src/test/regress/sql/btree_index.sql
@@ -150,25 +150,6 @@ create index btree_tall_idx on btree_tall_tbl (t, id) with (fillfactor = 10);
 insert into btree_tall_tbl select g, repeat('x', 250)
 from generate_series(1, 130) g;
 
---
--- Test vacuum_cleanup_index_scale_factor
---
-
--- Simple create
-create table btree_test(a int);
-create index btree_idx1 on btree_test(a) with (vacuum_cleanup_index_scale_factor = 40.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
-
--- Fail while setting improper values
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = -10.0);
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 100.0);
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = 'string');
-create index btree_idx_err on btree_test(a) with (vacuum_cleanup_index_scale_factor = true);
-
--- Simple ALTER INDEX
-alter index btree_idx1 set (vacuum_cleanup_index_scale_factor = 70.0);
-select reloptions from pg_class WHERE oid = 'btree_idx1'::regclass;
-
 --
 -- Test for multilevel page deletion
 --

base-commit: 8a812e5106c5db50039336288d376a188844e2cc
-- 
2.27.0

