From 8171d5a4b7e6a483f4213a0804cd52d429874f13 Mon Sep 17 00:00:00 2001
From: Masahiko Sawada <sawada.mshk@gmail.com>
Date: Mon, 4 Jan 2021 13:34:10 +0900
Subject: [PATCH v3 1/3] Choose vacuum strategy before heap and index vacuums.

If index_cleanup option is specified neither VACUUM command nor
storage option, lazy vacuum asks each index the vacuum strategy before
heap vacuum and decides whether or not to remove the collected garbage
tuples from the heap based on both the answers of amvacuumstrategy, a
new index AM API introduced in this commit, and how many LP_DEAD items
can be accumlated in a space of heap page left by fillfactor.

The decision made by lazy vacuum and the answer returned from
amvacuumstrategy are passed to ambulkdelete. Then each index can
choose whether or not to skip index bulk-deletion accordingly.
---
 contrib/bloom/bloom.h                         |   2 +
 contrib/bloom/blutils.c                       |   1 +
 contrib/bloom/blvacuum.c                      |  23 +-
 doc/src/sgml/indexam.sgml                     |  25 ++
 doc/src/sgml/ref/create_table.sgml            |  19 +-
 src/backend/access/brin/brin.c                |   8 +-
 src/backend/access/common/reloptions.c        |  40 +-
 src/backend/access/gin/ginpostinglist.c       |  30 +-
 src/backend/access/gin/ginutil.c              |   1 +
 src/backend/access/gin/ginvacuum.c            |  25 ++
 src/backend/access/gist/gist.c                |   1 +
 src/backend/access/gist/gistvacuum.c          |  28 +-
 src/backend/access/hash/hash.c                |  22 +
 src/backend/access/heap/vacuumlazy.c          | 376 ++++++++++++++----
 src/backend/access/index/indexam.c            |  22 +
 src/backend/access/nbtree/nbtree.c            |  34 ++
 src/backend/access/spgist/spgutils.c          |   1 +
 src/backend/access/spgist/spgvacuum.c         |  27 +-
 src/backend/catalog/index.c                   |   2 +
 src/backend/commands/analyze.c                |   1 +
 src/backend/commands/vacuum.c                 |  23 +-
 src/include/access/amapi.h                    |   7 +-
 src/include/access/genam.h                    |  36 +-
 src/include/access/gin_private.h              |   2 +
 src/include/access/gist_private.h             |   2 +
 src/include/access/hash.h                     |   2 +
 src/include/access/htup_details.h             |  21 +-
 src/include/access/nbtree.h                   |   2 +
 src/include/access/spgist.h                   |   2 +
 src/include/commands/vacuum.h                 |  20 +-
 src/include/utils/rel.h                       |  17 +-
 .../expected/test_ginpostinglist.out          |   6 +-
 32 files changed, 672 insertions(+), 156 deletions(-)

diff --git a/contrib/bloom/bloom.h b/contrib/bloom/bloom.h
index a22a6dfa40..8395d31450 100644
--- a/contrib/bloom/bloom.h
+++ b/contrib/bloom/bloom.h
@@ -202,6 +202,8 @@ extern void blendscan(IndexScanDesc scan);
 extern IndexBuildResult *blbuild(Relation heap, Relation index,
 								 struct IndexInfo *indexInfo);
 extern void blbuildempty(Relation index);
+extern IndexVacuumStrategy blvacuumstrategy(IndexVacuumInfo *info,
+											struct VacuumParams *params);
 extern IndexBulkDeleteResult *blbulkdelete(IndexVacuumInfo *info,
 										   IndexBulkDeleteResult *stats, IndexBulkDeleteCallback callback,
 										   void *callback_state);
diff --git a/contrib/bloom/blutils.c b/contrib/bloom/blutils.c
index 1e505b1da5..8098d75c82 100644
--- a/contrib/bloom/blutils.c
+++ b/contrib/bloom/blutils.c
@@ -131,6 +131,7 @@ blhandler(PG_FUNCTION_ARGS)
 	amroutine->ambuild = blbuild;
 	amroutine->ambuildempty = blbuildempty;
 	amroutine->aminsert = blinsert;
+	amroutine->amvacuumstrategy = blvacuumstrategy;
 	amroutine->ambulkdelete = blbulkdelete;
 	amroutine->amvacuumcleanup = blvacuumcleanup;
 	amroutine->amcanreturn = NULL;
diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c
index 88b0a6d290..c356ec9e85 100644
--- a/contrib/bloom/blvacuum.c
+++ b/contrib/bloom/blvacuum.c
@@ -23,6 +23,19 @@
 #include "storage/lmgr.h"
 
 
+/*
+ * Choose the vacuum strategy. Do bulk-deletion unless index cleanup
+ * is specified to off.
+ */
+IndexVacuumStrategy
+blvacuumstrategy(IndexVacuumInfo *info, VacuumParams *params)
+{
+	if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
+		return INDEX_VACUUM_STRATEGY_NONE;
+	else
+		return INDEX_VACUUM_STRATEGY_BULKDELETE;
+}
+
 /*
  * Bulk deletion of all index entries pointing to a set of heap tuples.
  * The set of target tuples is specified via a callback routine that tells
@@ -45,6 +58,14 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	BloomMetaPageData *metaData;
 	GenericXLogState *gxlogState;
 
+	/*
+	 * Skip deleting index entries if the corresponding heap tuples will
+	 * not be deleted and we want to skip it.
+	 */
+	if (!info->will_vacuum_heap &&
+		info->indvac_strategy == INDEX_VACUUM_STRATEGY_NONE)
+		return stats;
+
 	if (stats == NULL)
 		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
 
@@ -172,7 +193,7 @@ blvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 	BlockNumber npages,
 				blkno;
 
-	if (info->analyze_only)
+	if (info->analyze_only || !info->vacuumcleanup_requested)
 		return stats;
 
 	if (stats == NULL)
diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml
index ec5741df6d..9f881303f6 100644
--- a/doc/src/sgml/indexam.sgml
+++ b/doc/src/sgml/indexam.sgml
@@ -135,6 +135,7 @@ typedef struct IndexAmRoutine
     ambuild_function ambuild;
     ambuildempty_function ambuildempty;
     aminsert_function aminsert;
+    amvacuumstrategy_function amvacuumstrategy;
     ambulkdelete_function ambulkdelete;
     amvacuumcleanup_function amvacuumcleanup;
     amcanreturn_function amcanreturn;   /* can be NULL */
@@ -346,6 +347,30 @@ aminsert (Relation indexRelation,
 
   <para>
 <programlisting>
+IndexVacuumStrategy
+amvacuumstrategy (IndexVacuumInfo *info);
+</programlisting>
+   Tell <command>VACUUM</command> whether or not the index is willing to
+   delete index tuples.  This callback is called before
+   <function>ambulkdelete</function>.  Possible return values are
+   <literal>INDEX_VACUUM_STRATEGY_NONE</literal> and
+   <literal>INDEX_VACUUM_STRATEGY_BULKDELETE</literal>.  From the index
+   pont of view, if the index doesn't need to delete index tuple, it
+   must return <literal>INDEX_VACUUM_STRATEGY_NONE</literal>.  The returned
+   value can be referred  when <function>ambulkdelete</function> by checking
+   <literal>info-&gt;indvac_strategy</literal>.
+  </para>
+  <para>
+   <command>VACUUM</command> will decide whether or not to delete garbage tuples
+   from the heap based on these returned values from each index and several other
+   factors.  Therefore, if the index refers to heap TID and <command>VACUUM</command>
+   decides to delete garbage tuples from the heap, please note that the index also
+   must delete index tuples even if it returned
+   <literal>INDEX_VACUUM_STRATEGY_NONE</literal>.
+  </para>
+
+  <para>
+<programlisting>
 IndexBulkDeleteResult *
 ambulkdelete (IndexVacuumInfo *info,
               IndexBulkDeleteResult *stats,
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 569f4c9da7..c45cdcb292 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -1434,20 +1434,23 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
    </varlistentry>
 
    <varlistentry id="reloption-vacuum-index-cleanup" xreflabel="vacuum_index_cleanup">
-    <term><literal>vacuum_index_cleanup</literal>, <literal>toast.vacuum_index_cleanup</literal> (<type>boolean</type>)
+    <term><literal>vacuum_index_cleanup</literal>, <literal>toast.vacuum_index_cleanup</literal> (<type>enum</type>)
     <indexterm>
      <primary><varname>vacuum_index_cleanup</varname> storage parameter</primary>
     </indexterm>
     </term>
     <listitem>
      <para>
-      Enables or disables index cleanup when <command>VACUUM</command> is
-      run on this table.  The default value is <literal>true</literal>.
-      Disabling index cleanup can speed up <command>VACUUM</command> very
-      significantly, but may also lead to severely bloated indexes if table
-      modifications are frequent.  The <literal>INDEX_CLEANUP</literal>
-      parameter of <link linkend="sql-vacuum"><command>VACUUM</command></link>, if specified, overrides
-      the value of this option.
+      Specify index cleanup option when <command>VACUUM</command> is
+      run on this table.  The default value is <literal>auto</literal>, which
+      determines whether to enable or disable index cleanup based on the indexes
+      and the heap.  With <literal>off</literal> index cleanup is disabled, with
+      <literal>on</literal> it is enabled. Disabling index cleanup can speed up
+      <command>VACUUM</command> very significantly, but may also lead to severely
+      bloated indexes if table modifications are frequent.  The
+      <literal>INDEX_CLEANUP</literal> parameter of
+      <link linkend="sql-vacuum"><command>VACUUM</command></link>, if specified,
+      overrides the value of this option.
      </para>
     </listitem>
    </varlistentry>
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 27ba596c6e..fb70234112 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -112,6 +112,7 @@ brinhandler(PG_FUNCTION_ARGS)
 	amroutine->ambuild = brinbuild;
 	amroutine->ambuildempty = brinbuildempty;
 	amroutine->aminsert = brininsert;
+	amroutine->amvacuumstrategy = NULL;
 	amroutine->ambulkdelete = brinbulkdelete;
 	amroutine->amvacuumcleanup = brinvacuumcleanup;
 	amroutine->amcanreturn = NULL;
@@ -800,8 +801,11 @@ brinvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 {
 	Relation	heapRel;
 
-	/* No-op in ANALYZE ONLY mode */
-	if (info->analyze_only)
+	/*
+	 * No-op in ANALYZE ONLY mode or when user requests to disable index
+	 * cleanup.
+	 */
+	if (info->analyze_only || !info->vacuumcleanup_requested)
 		return stats;
 
 	if (!stats)
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index c687d3ee9e..692455d617 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -27,6 +27,7 @@
 #include "catalog/pg_type.h"
 #include "commands/defrem.h"
 #include "commands/tablespace.h"
+#include "commands/vacuum.h"
 #include "commands/view.h"
 #include "nodes/makefuncs.h"
 #include "postmaster/postmaster.h"
@@ -140,15 +141,6 @@ static relopt_bool boolRelOpts[] =
 		},
 		false
 	},
-	{
-		{
-			"vacuum_index_cleanup",
-			"Enables index vacuuming and index cleanup",
-			RELOPT_KIND_HEAP | RELOPT_KIND_TOAST,
-			ShareUpdateExclusiveLock
-		},
-		true
-	},
 	{
 		{
 			"vacuum_truncate",
@@ -492,6 +484,23 @@ relopt_enum_elt_def viewCheckOptValues[] =
 	{(const char *) NULL}		/* list terminator */
 };
 
+/*
+ * values from VacOptTernaryValue for index_cleanup option.
+ * Allowing boolean values other than "on" and "off" are for
+ * backward compatibility as the option is used to be an
+ * boolean.
+ */
+relopt_enum_elt_def vacOptTernaryOptValues[] =
+{
+	{"auto", VACOPT_TERNARY_DEFAULT},
+	{"true", VACOPT_TERNARY_ENABLED},
+	{"false", VACOPT_TERNARY_DISABLED},
+	{"on", VACOPT_TERNARY_ENABLED},
+	{"off", VACOPT_TERNARY_DISABLED},
+	{"1", VACOPT_TERNARY_ENABLED},
+	{"0", VACOPT_TERNARY_DISABLED}
+};
+
 static relopt_enum enumRelOpts[] =
 {
 	{
@@ -516,6 +525,17 @@ static relopt_enum enumRelOpts[] =
 		VIEW_OPTION_CHECK_OPTION_NOT_SET,
 		gettext_noop("Valid values are \"local\" and \"cascaded\".")
 	},
+	{
+		{
+			"vacuum_index_cleanup",
+			"Enables index vacuuming and index cleanup",
+			RELOPT_KIND_HEAP | RELOPT_KIND_TOAST,
+			ShareUpdateExclusiveLock
+		},
+		vacOptTernaryOptValues,
+		VACOPT_TERNARY_DEFAULT,
+		gettext_noop("Valid values are \"on\", \"off\", and \"auto\".")
+	},
 	/* list terminator */
 	{{NULL}}
 };
@@ -1856,7 +1876,7 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
 		offsetof(StdRdOptions, user_catalog_table)},
 		{"parallel_workers", RELOPT_TYPE_INT,
 		offsetof(StdRdOptions, parallel_workers)},
-		{"vacuum_index_cleanup", RELOPT_TYPE_BOOL,
+		{"vacuum_index_cleanup", RELOPT_TYPE_ENUM,
 		offsetof(StdRdOptions, vacuum_index_cleanup)},
 		{"vacuum_truncate", RELOPT_TYPE_BOOL,
 		offsetof(StdRdOptions, vacuum_truncate)}
diff --git a/src/backend/access/gin/ginpostinglist.c b/src/backend/access/gin/ginpostinglist.c
index 216b2b9a2c..0322a1736e 100644
--- a/src/backend/access/gin/ginpostinglist.c
+++ b/src/backend/access/gin/ginpostinglist.c
@@ -22,29 +22,29 @@
 
 /*
  * For encoding purposes, item pointers are represented as 64-bit unsigned
- * integers. The lowest 11 bits represent the offset number, and the next
- * lowest 32 bits are the block number. That leaves 21 bits unused, i.e.
- * only 43 low bits are used.
+ * integers. The lowest 12 bits represent the offset number, and the next
+ * lowest 32 bits are the block number. That leaves 20 bits unused, i.e.
+ * only 44 low bits are used.
  *
- * 11 bits is enough for the offset number, because MaxHeapTuplesPerPage <
- * 2^11 on all supported block sizes. We are frugal with the bits, because
+ * 12 bits is enough for the offset number, because MaxHeapTuplesPerPage <
+ * 2^12 on all supported block sizes. We are frugal with the bits, because
  * smaller integers use fewer bytes in the varbyte encoding, saving disk
  * space. (If we get a new table AM in the future that wants to use the full
  * range of possible offset numbers, we'll need to change this.)
  *
- * These 43-bit integers are encoded using varbyte encoding. In each byte,
+ * These 44-bit integers are encoded using varbyte encoding. In each byte,
  * the 7 low bits contain data, while the highest bit is a continuation bit.
  * When the continuation bit is set, the next byte is part of the same
- * integer, otherwise this is the last byte of this integer. 43 bits need
+ * integer, otherwise this is the last byte of this integer. 44 bits need
  * at most 7 bytes in this encoding:
  *
  * 0XXXXXXX
- * 1XXXXXXX 0XXXXYYY
- * 1XXXXXXX 1XXXXYYY 0YYYYYYY
- * 1XXXXXXX 1XXXXYYY 1YYYYYYY 0YYYYYYY
- * 1XXXXXXX 1XXXXYYY 1YYYYYYY 1YYYYYYY 0YYYYYYY
- * 1XXXXXXX 1XXXXYYY 1YYYYYYY 1YYYYYYY 1YYYYYYY 0YYYYYYY
- * 1XXXXXXX 1XXXXYYY 1YYYYYYY 1YYYYYYY 1YYYYYYY 1YYYYYYY 0uuuuuuY
+ * 1XXXXXXX 0XXXXXYY
+ * 1XXXXXXX 1XXXXXYY 0YYYYYYY
+ * 1XXXXXXX 1XXXXXYY 1YYYYYYY 0YYYYYYY
+ * 1XXXXXXX 1XXXXXYY 1YYYYYYY 1YYYYYYY 0YYYYYYY
+ * 1XXXXXXX 1XXXXXYY 1YYYYYYY 1YYYYYYY 1YYYYYYY 0YYYYYYY
+ * 1XXXXXXX 1XXXXXYY 1YYYYYYY 1YYYYYYY 1YYYYYYY 1YYYYYYY 0uuuuuYY
  *
  * X = bits used for offset number
  * Y = bits used for block number
@@ -73,12 +73,12 @@
 
 /*
  * How many bits do you need to encode offset number? OffsetNumber is a 16-bit
- * integer, but you can't fit that many items on a page. 11 ought to be more
+ * integer, but you can't fit that many items on a page. 12 ought to be more
  * than enough. It's tempting to derive this from MaxHeapTuplesPerPage, and
  * use the minimum number of bits, but that would require changing the on-disk
  * format if MaxHeapTuplesPerPage changes. Better to leave some slack.
  */
-#define MaxHeapTuplesPerPageBits		11
+#define MaxHeapTuplesPerPageBits		12
 
 /* Max. number of bytes needed to encode the largest supported integer. */
 #define MaxBytesPerInteger				7
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 6b9b04cf42..fc375332fc 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -63,6 +63,7 @@ ginhandler(PG_FUNCTION_ARGS)
 	amroutine->ambuild = ginbuild;
 	amroutine->ambuildempty = ginbuildempty;
 	amroutine->aminsert = gininsert;
+	amroutine->amvacuumstrategy = ginvacuumstrategy;
 	amroutine->ambulkdelete = ginbulkdelete;
 	amroutine->amvacuumcleanup = ginvacuumcleanup;
 	amroutine->amcanreturn = NULL;
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 35b85a9bff..68bec5238a 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -560,6 +560,19 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3
 	return (tmppage == origpage) ? NULL : tmppage;
 }
 
+/*
+ * Choose the vacuum strategy. Do bulk-deletion unless index cleanup
+ * is specified to off.
+ */
+IndexVacuumStrategy
+ginvacuumstrategy(IndexVacuumInfo *info, VacuumParams *params)
+{
+	if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
+		return INDEX_VACUUM_STRATEGY_NONE;
+	else
+		return INDEX_VACUUM_STRATEGY_BULKDELETE;
+}
+
 IndexBulkDeleteResult *
 ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 			  IndexBulkDeleteCallback callback, void *callback_state)
@@ -571,6 +584,14 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))];
 	uint32		nRoot;
 
+	/*
+	 * Skip deleting index entries if the corresponding heap tuples will
+	 * not be deleted and we want to skip it.
+	 */
+	if (!info->will_vacuum_heap &&
+		info->indvac_strategy == INDEX_VACUUM_STRATEGY_NONE)
+		return stats;
+
 	gvs.tmpCxt = AllocSetContextCreate(CurrentMemoryContext,
 									   "Gin vacuum temporary context",
 									   ALLOCSET_DEFAULT_SIZES);
@@ -708,6 +729,10 @@ ginvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 		return stats;
 	}
 
+	/* Skip index cleanup if user requests to disable */
+	if (!info->vacuumcleanup_requested)
+		return stats;
+
 	/*
 	 * Set up all-zero stats and cleanup pending inserts if ginbulkdelete
 	 * wasn't called
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index f203bb594c..cddcdd83be 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -84,6 +84,7 @@ gisthandler(PG_FUNCTION_ARGS)
 	amroutine->ambuild = gistbuild;
 	amroutine->ambuildempty = gistbuildempty;
 	amroutine->aminsert = gistinsert;
+	amroutine->amvacuumstrategy = gistvacuumstrategy;
 	amroutine->ambulkdelete = gistbulkdelete;
 	amroutine->amvacuumcleanup = gistvacuumcleanup;
 	amroutine->amcanreturn = gistcanreturn;
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 94a7e12763..706454b2f0 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -52,6 +52,19 @@ static bool gistdeletepage(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 						   Buffer buffer, OffsetNumber downlink,
 						   Buffer leafBuffer);
 
+/*
+ * Choose the vacuum strategy. Do bulk-deletion unless index cleanup
+ * is specified to off.
+ */
+IndexVacuumStrategy
+gistvacuumstrategy(IndexVacuumInfo *info, VacuumParams *params)
+{
+	if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
+		return INDEX_VACUUM_STRATEGY_NONE;
+	else
+		return INDEX_VACUUM_STRATEGY_BULKDELETE;
+}
+
 /*
  * VACUUM bulkdelete stage: remove index entries.
  */
@@ -59,6 +72,14 @@ IndexBulkDeleteResult *
 gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 			   IndexBulkDeleteCallback callback, void *callback_state)
 {
+	/*
+	 * Skip deleting index entries if the corresponding heap tuples will
+	 * not be deleted and we want to skip it.
+	 */
+	if (!info->will_vacuum_heap &&
+		info->indvac_strategy == INDEX_VACUUM_STRATEGY_NONE)
+		return stats;
+
 	/* allocate stats if first time through, else re-use existing struct */
 	if (stats == NULL)
 		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
@@ -74,8 +95,11 @@ gistbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 IndexBulkDeleteResult *
 gistvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 {
-	/* No-op in ANALYZE ONLY mode */
-	if (info->analyze_only)
+	/*
+	 * No-op in ANALYZE ONLY mode or when user requests to disable index
+	 * cleanup.
+	 */
+	if (info->analyze_only || !info->vacuumcleanup_requested)
 		return stats;
 
 	/*
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 0752fb38a9..0449638cb3 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -81,6 +81,7 @@ hashhandler(PG_FUNCTION_ARGS)
 	amroutine->ambuild = hashbuild;
 	amroutine->ambuildempty = hashbuildempty;
 	amroutine->aminsert = hashinsert;
+	amroutine->amvacuumstrategy = hashvacuumstrategy;
 	amroutine->ambulkdelete = hashbulkdelete;
 	amroutine->amvacuumcleanup = hashvacuumcleanup;
 	amroutine->amcanreturn = NULL;
@@ -444,6 +445,19 @@ hashendscan(IndexScanDesc scan)
 	scan->opaque = NULL;
 }
 
+/*
+ * Choose the vacuum strategy. Do bulk-deletion unless index cleanup
+ * is specified to off.
+ */
+IndexVacuumStrategy
+hashvacuumstrategy(IndexVacuumInfo *info, VacuumParams *params)
+{
+	if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
+		return INDEX_VACUUM_STRATEGY_NONE;
+	else
+		return INDEX_VACUUM_STRATEGY_BULKDELETE;
+}
+
 /*
  * Bulk deletion of all index entries pointing to a set of heap tuples.
  * The set of target tuples is specified via a callback routine that tells
@@ -469,6 +483,14 @@ hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	HashMetaPage metap;
 	HashMetaPage cachedmetap;
 
+	/*
+	 * Skip deleting index entries if the corresponding heap tuples will
+	 * not be deleted and we want to skip it.
+	 */
+	if (!info->will_vacuum_heap &&
+		info->indvac_strategy == INDEX_VACUUM_STRATEGY_NONE)
+		return stats;
+
 	tuples_removed = 0;
 	num_index_tuples = 0;
 
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index f3d2265fad..b99b7e51f4 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -130,6 +130,15 @@
  */
 #define PREFETCH_SIZE			((BlockNumber) 32)
 
+/*
+ * Safety ratio of how many LP_DEAD items can be stored in single heap
+ * page before it starts to overflow.  We're trying to avoid having VACUUM
+ * call lazy_vacuum_heap() in most cases, but we don't want to be too
+ * aggressive: it would be risky to make the value we test for much higher,
+ * since it might be too late by the time we actually call lazy_vacuum_heap().
+ */
+#define DEAD_ITEMS_ON_PAGE_LIMIT_SAFETY_RATIO	0.7
+
 /*
  * DSM keys for parallel vacuum.  Unlike other parallel execution code, since
  * we don't need to worry about DSM keys conflicting with plan_node_id we can
@@ -140,6 +149,7 @@
 #define PARALLEL_VACUUM_KEY_QUERY_TEXT		3
 #define PARALLEL_VACUUM_KEY_BUFFER_USAGE	4
 #define PARALLEL_VACUUM_KEY_WAL_USAGE		5
+#define PARALLEL_VACUUM_KEY_IND_STRATEGY	6
 
 /*
  * Macro to check if we are in a parallel vacuum.  If true, we are in the
@@ -214,6 +224,18 @@ typedef struct LVShared
 	double		reltuples;
 	bool		estimated_count;
 
+	/*
+	 * Copy of LVRelStats.vacuum_cheap. It tells index AM that lazy vacuum
+	 * will remove dead tuples from the heap after index vacuum.
+	 */
+	bool vacuum_heap;
+
+	/*
+	 * Copy of LVRelStats.indexcleanup_requested. It tells index AM whether
+	 * amvacuumcleanup is requested or not.
+	 */
+	bool indexcleanup_requested;
+
 	/*
 	 * In single process lazy vacuum we could consume more memory during index
 	 * vacuuming or cleanup apart from the memory for heap scanning.  In
@@ -293,8 +315,8 @@ typedef struct LVRelStats
 {
 	char	   *relnamespace;
 	char	   *relname;
-	/* useindex = true means two-pass strategy; false means one-pass */
-	bool		useindex;
+	/* hasindex = true means two-pass strategy; false means one-pass */
+	bool		hasindex;
 	/* Overall statistics about rel */
 	BlockNumber old_rel_pages;	/* previous value of pg_class.relpages */
 	BlockNumber rel_pages;		/* total number of pages */
@@ -313,6 +335,15 @@ typedef struct LVRelStats
 	int			num_index_scans;
 	TransactionId latestRemovedXid;
 	bool		lock_waiter_detected;
+	bool		vacuum_heap;	/* do we remove dead tuples from the heap? */
+	bool		indexcleanup_requested; /* INDEX_CLEANUP is set to false */
+
+	/*
+	 * The array of index vacuum strategies for each index returned from
+	 * amvacuumstrategy. This is allocated in the DSM segment in parallel
+	 * mode and in local memory in non-parallel mode.
+	 */
+	IndexVacuumStrategy *ivstrategies;
 
 	/* Used for error callback */
 	char	   *indname;
@@ -320,6 +351,8 @@ typedef struct LVRelStats
 	OffsetNumber offnum;		/* used only for heap operations */
 	VacErrPhase phase;
 } LVRelStats;
+#define SizeOfIndVacStrategies(nindexes) \
+	(mul_size(sizeof(IndexVacuumStrategy), (nindexes)))
 
 /* Struct for saving and restoring vacuum error information. */
 typedef struct LVSavedErrInfo
@@ -343,6 +376,13 @@ static BufferAccessStrategy vac_strategy;
 static void lazy_scan_heap(Relation onerel, VacuumParams *params,
 						   LVRelStats *vacrelstats, Relation *Irel, int nindexes,
 						   bool aggressive);
+static void choose_vacuum_strategy(Relation onerel, LVRelStats *vacrelstats,
+								   VacuumParams *params, Relation *Irel,
+								   int nindexes, int ndeaditems);
+static void lazy_vacuum_table_and_indexes(Relation onerel, VacuumParams *params,
+										  LVRelStats *vacrelstats, Relation *Irel,
+										  int nindexes, IndexBulkDeleteResult **stats,
+										  LVParallelState *lps, int *maxdeadtups);
 static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup,
 									LVRelStats *vacrelstats);
@@ -351,7 +391,8 @@ static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 									LVRelStats *vacrelstats, LVParallelState *lps,
 									int nindexes);
 static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
-							  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats);
+							  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats,
+							  IndexVacuumStrategy ivstrat);
 static void lazy_cleanup_index(Relation indrel,
 							   IndexBulkDeleteResult **stats,
 							   double reltuples, bool estimated_count, LVRelStats *vacrelstats);
@@ -362,7 +403,8 @@ static bool should_attempt_truncation(VacuumParams *params,
 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
 static BlockNumber count_nondeletable_pages(Relation onerel,
 											LVRelStats *vacrelstats);
-static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
+static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks,
+							 int nindexes);
 static void lazy_record_dead_tuple(LVDeadTuples *dead_tuples,
 								   ItemPointer itemptr);
 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
@@ -381,7 +423,8 @@ static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 								  int nindexes);
 static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 							 LVShared *lvshared, LVSharedIndStats *shared_indstats,
-							 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats);
+							 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats,
+							 IndexVacuumStrategy ivstrat);
 static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 									 LVRelStats *vacrelstats, LVParallelState *lps,
 									 int nindexes);
@@ -442,7 +485,6 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	ErrorContextCallback errcallback;
 
 	Assert(params != NULL);
-	Assert(params->index_cleanup != VACOPT_TERNARY_DEFAULT);
 	Assert(params->truncate != VACOPT_TERNARY_DEFAULT);
 
 	/* not every AM requires these to be valid, but heap does */
@@ -501,8 +543,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 
 	/* Open all indexes of the relation */
 	vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
-	vacrelstats->useindex = (nindexes > 0 &&
-							 params->index_cleanup == VACOPT_TERNARY_ENABLED);
+	vacrelstats->hasindex = (nindexes > 0);
 
 	/*
 	 * Setup error traceback support for ereport().  The idea is to set up an
@@ -763,6 +804,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	BlockNumber empty_pages,
 				vacuumed_pages,
 				next_fsm_block_to_vacuum;
+	int			maxdeadtups = 0;	/* maximum # of dead tuples in a single page */
 	double		num_tuples,		/* total number of nonremovable tuples */
 				live_tuples,	/* live tuples (reltuples estimate) */
 				tups_vacuumed,	/* tuples cleaned up by vacuum */
@@ -811,14 +853,24 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	vacrelstats->nonempty_pages = 0;
 	vacrelstats->latestRemovedXid = InvalidTransactionId;
 
+	/*
+	 * index vacuum cleanup is enabled if index cleanup is not disabled,
+	 * i.g., it's true when either default or enabled.
+	 */
+	vacrelstats->indexcleanup_requested =
+		(params->index_cleanup != VACOPT_TERNARY_DISABLED);
+
 	vistest = GlobalVisTestFor(onerel);
 
 	/*
 	 * Initialize state for a parallel vacuum.  As of now, only one worker can
 	 * be used for an index, so we invoke parallelism only if there are at
-	 * least two indexes on a table.
+	 * least two indexes on a table. When the index cleanup is disabled,
+	 * since index bulk-deletion is likely to be no-op we disable a parallel
+	 * vacuum.
 	 */
-	if (params->nworkers >= 0 && vacrelstats->useindex && nindexes > 1)
+	if (params->nworkers >= 0 && nindexes > 1 &&
+		params->index_cleanup != VACOPT_TERNARY_DISABLED)
 	{
 		/*
 		 * Since parallel workers cannot access data in temporary tables, we
@@ -846,7 +898,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	 * initialized.
 	 */
 	if (!ParallelVacuumIsActive(lps))
-		lazy_space_alloc(vacrelstats, nblocks);
+		lazy_space_alloc(vacrelstats, nblocks, nindexes);
 
 	dead_tuples = vacrelstats->dead_tuples;
 	frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
@@ -1050,19 +1102,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				vmbuffer = InvalidBuffer;
 			}
 
-			/* Work on all the indexes, then the heap */
-			lazy_vacuum_all_indexes(onerel, Irel, indstats,
-									vacrelstats, lps, nindexes);
-
-			/* Remove tuples from heap */
-			lazy_vacuum_heap(onerel, vacrelstats);
-
-			/*
-			 * Forget the now-vacuumed tuples, and press on, but be careful
-			 * not to reset latestRemovedXid since we want that value to be
-			 * valid.
-			 */
-			dead_tuples->num_tuples = 0;
+			/* Vacuum the table and its indexes */
+			lazy_vacuum_table_and_indexes(onerel, params, vacrelstats,
+										  Irel, nindexes, indstats,
+										  lps, &maxdeadtups);
 
 			/*
 			 * Vacuum the Free Space Map to make newly-freed space visible on
@@ -1512,32 +1555,16 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 		/*
 		 * If there are no indexes we can vacuum the page right now instead of
-		 * doing a second scan. Also we don't do that but forget dead tuples
-		 * when index cleanup is disabled.
+		 * doing a second scan.
 		 */
-		if (!vacrelstats->useindex && dead_tuples->num_tuples > 0)
+		if (!vacrelstats->hasindex && dead_tuples->num_tuples > 0)
 		{
-			if (nindexes == 0)
-			{
-				/* Remove tuples from heap if the table has no index */
-				lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
-				vacuumed_pages++;
-				has_dead_tuples = false;
-			}
-			else
-			{
-				/*
-				 * Here, we have indexes but index cleanup is disabled.
-				 * Instead of vacuuming the dead tuples on the heap, we just
-				 * forget them.
-				 *
-				 * Note that vacrelstats->dead_tuples could have tuples which
-				 * became dead after HOT-pruning but are not marked dead yet.
-				 * We do not process them because it's a very rare condition,
-				 * and the next vacuum will process them anyway.
-				 */
-				Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
-			}
+			Assert(nindexes == 0);
+
+			/* Remove tuples from heap if the table has no index */
+			lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
+			vacuumed_pages++;
+			has_dead_tuples = false;
 
 			/*
 			 * Forget the now-vacuumed tuples, and press on, but be careful
@@ -1663,6 +1690,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		 */
 		if (dead_tuples->num_tuples == prev_dead_count)
 			RecordPageWithFreeSpace(onerel, blkno, freespace);
+		else
+			maxdeadtups = Max(maxdeadtups,
+							  dead_tuples->num_tuples - prev_dead_count);
 	}
 
 	/* report that everything is scanned and vacuumed */
@@ -1702,14 +1732,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	/* If any tuples need to be deleted, perform final vacuum cycle */
 	/* XXX put a threshold on min number of tuples here? */
 	if (dead_tuples->num_tuples > 0)
-	{
-		/* Work on all the indexes, and then the heap */
-		lazy_vacuum_all_indexes(onerel, Irel, indstats, vacrelstats,
-								lps, nindexes);
-
-		/* Remove tuples from heap */
-		lazy_vacuum_heap(onerel, vacrelstats);
-	}
+		lazy_vacuum_table_and_indexes(onerel, params, vacrelstats,
+									  Irel, nindexes, indstats,
+									  lps, &maxdeadtups);
 
 	/*
 	 * Vacuum the remainder of the Free Space Map.  We must do this whether or
@@ -1722,7 +1747,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
 
 	/* Do post-vacuum cleanup */
-	if (vacrelstats->useindex)
+	if (vacrelstats->hasindex)
 		lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes);
 
 	/*
@@ -1775,6 +1800,140 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	pfree(buf.data);
 }
 
+/*
+ * Remove the collected garbage tuples from the table and its indexes.
+ */
+static void
+lazy_vacuum_table_and_indexes(Relation onerel, VacuumParams *params,
+							  LVRelStats *vacrelstats, Relation *Irel,
+							  int nindexes, IndexBulkDeleteResult **indstats,
+							  LVParallelState *lps, int *maxdeadtups)
+{
+	/*
+	 * Choose the vacuum strategy for this vacuum cycle.
+	 * choose_vacuum_strategy() will set the decision to
+	 * vacrelstats->vacuum_heap.
+	 */
+	choose_vacuum_strategy(onerel, vacrelstats, params, Irel, nindexes,
+						   *maxdeadtups);
+
+	/* Work on all the indexes, then the heap */
+	lazy_vacuum_all_indexes(onerel, Irel, indstats, vacrelstats, lps,
+							nindexes);
+
+	if (vacrelstats->vacuum_heap)
+	{
+		/* Remove tuples from heap */
+		lazy_vacuum_heap(onerel, vacrelstats);
+	}
+	else
+	{
+		/*
+		 * Here, we don't do heap vacuum in this cycle.
+		 *
+		 * Note that vacrelstats->dead_tuples could have tuples which
+		 * became dead after HOT-pruning but are not marked dead yet.
+		 * We do not process them because it's a very rare condition,
+		 * and the next vacuum will process them anyway.
+		 */
+		Assert(params->index_cleanup != VACOPT_TERNARY_ENABLED);
+	}
+
+	/*
+	 * Forget the now-vacuumed tuples, and press on, but be careful
+	 * not to reset latestRemovedXid since we want that value to be
+	 * valid.
+	 */
+	vacrelstats->dead_tuples->num_tuples = 0;
+	*maxdeadtups = 0;
+}
+
+/*
+ * Decide whether or not we remove the collected garbage tuples from the
+ * heap. The decision is set to vacrelstats->vacuum_heap. ndeaditems is
+ * maximum number of LP_DEAD items on any one heap page encountered during
+ * heap scan.
+ */
+static void
+choose_vacuum_strategy(Relation onerel, LVRelStats *vacrelstats,
+					   VacuumParams *params, Relation *Irel, int nindexes,
+					   int ndeaditems)
+{
+	bool vacuum_heap = true;
+	int i;
+
+	/*
+	 * Ask each index the vacuum strategy, and save them. If even on index
+	 * returns 'none', we can skip heap vacuum in this cycle at least from
+	 * the index strategies point of view. The consequence might be overwritten
+	 * by other factors, see below.
+	 */
+	for (i = 0; i < nindexes; i++)
+	{
+		IndexVacuumInfo ivinfo;
+
+		ivinfo.index = Irel[i];
+		ivinfo.message_level = elevel;
+
+		/* Save the returned value */
+		vacrelstats->ivstrategies[i] = index_vacuum_strategy(&ivinfo, params);
+
+		if (vacrelstats->ivstrategies[i] == INDEX_VACUUM_STRATEGY_NONE)
+			vacuum_heap = false;
+	}
+
+	/* If index cleanup option is specified, overwrite the consequence */
+	if (params->index_cleanup == VACOPT_TERNARY_ENABLED)
+		vacuum_heap = true;
+	else if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
+		vacuum_heap = false;
+	else if (!vacuum_heap)
+	{
+		Size freespace = RelationGetTargetPageFreeSpace(onerel,
+														HEAP_DEFAULT_FILLFACTOR);
+		int ndeaditems_limit = (int) ((freespace / sizeof(ItemIdData)) *
+									  DEAD_ITEMS_ON_PAGE_LIMIT_SAFETY_RATIO);
+
+		/*
+		 * Check whether we need to delete the collected garbage from the heap,
+		 * from the heap point of view.
+		 *
+		 * The test of ndeaditems_limit is for the maximum number of LP_DEAD
+		 * items on any one heap page encountered during heap scan by caller.
+		 * The general idea here is to preserve the original pristine state of
+		 * the table when it is subject to constant non-HOT updates when heap
+		 * fill factor is reduced from its default.
+		 *
+		 * To calculate how many LP_DEAD line pointers can be stored into the
+		 * space of a heap page left by fillfactor, we need to consider it from
+		 * two aspects: the size left by fillfactor and the maximum number of
+		 * heap tuples per pages, i.e., MaxHeapTuplesPerPage.  ndeaditems_limit
+		 * is calculated by using the freespace left by fillfactor -- we can fit
+		 * (freespace / sizeof(ItemIdData)) LP_DEAD items on a heap page before
+		 * they start to "overflow" with that setting, from the perspective of
+		 *  the space.  However, we cannot always store the calculated number of
+		 * LP_DEAD line pointers because of MaxHeapTuplesPerPage -- the total
+		 * number of line pointers in a heap page cannot exceed
+		 * MaxHeapTuplesPerPage. For example, with the small tuples, we can store
+		 * the more tuples in a heap page, meaning consuming the more free line
+		 * pointers to store heap tuples. So leaving line pointers as LP_DEAD
+		 * could consume line pointers that are supposed to store heap tuples,
+		 * resulting in an overflow.
+		 *
+		 * The below calculation, however, considers only the former aspect,
+		 * the space, because (1) MaxHeapTuplesPerPage is defined while
+		 * considering to accumulate a certain amount of LP_DEAD line pointers
+		 * and (2) to simplify the calculation. Thanks to (1) we don't need to
+		 * consider the upper bound in most cases.
+		 */
+		if (ndeaditems > ndeaditems_limit)
+			vacuum_heap = true;
+	}
+
+	vacrelstats->vacuum_heap = vacuum_heap;
+}
+
+
 /*
  *	lazy_vacuum_all_indexes() -- vacuum all indexes of relation.
  *
@@ -1818,7 +1977,8 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 
 		for (idx = 0; idx < nindexes; idx++)
 			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
-							  vacrelstats->old_live_tuples, vacrelstats);
+							  vacrelstats->old_live_tuples, vacrelstats,
+							  vacrelstats->ivstrategies[idx]);
 	}
 
 	/* Increase and report the number of index scans */
@@ -1827,7 +1987,6 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 								 vacrelstats->num_index_scans);
 }
 
-
 /*
  *	lazy_vacuum_heap() -- second pass over the heap
  *
@@ -2092,7 +2251,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 							 LVRelStats *vacrelstats, LVParallelState *lps,
 							 int nindexes)
 {
-	int			nworkers;
+	int			nworkers = 0;
 
 	Assert(!IsParallelWorker());
 	Assert(ParallelVacuumIsActive(lps));
@@ -2108,10 +2267,32 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 			nworkers = lps->nindexes_parallel_cleanup;
 	}
 	else
-		nworkers = lps->nindexes_parallel_bulkdel;
+	{
+		if (vacrelstats->vacuum_heap)
+			nworkers = lps->nindexes_parallel_bulkdel;
+		else
+		{
+			int i;
+
+			/*
+			 * If we don't vacuum heap, index bulk-deletion could be skipped
+			 * depending on indexes. So we calculate how many indexes will do
+			 * index bulk-deletion based on the answers to amvacuumstrategy.
+			 */
+			for (i = 0; i < nindexes; i++)
+			{
+				uint8 vacoptions = Irel[i]->rd_indam->amparallelvacuumoptions;
+
+				if ((vacoptions & VACUUM_OPTION_PARALLEL_BULKDEL) != 0 &&
+					vacrelstats->ivstrategies[i] == INDEX_VACUUM_STRATEGY_BULKDELETE)
+					nworkers++;
+			}
+		}
+	}
 
 	/* The leader process will participate */
-	nworkers--;
+	if (nworkers > 0)
+		nworkers--;
 
 	/*
 	 * It is possible that parallel context is initialized with fewer workers
@@ -2120,6 +2301,10 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 	 */
 	nworkers = Min(nworkers, lps->pcxt->nworkers);
 
+	/* Copy the information to the shared state */
+	lps->lvshared->vacuum_heap = vacrelstats->vacuum_heap;
+	lps->lvshared->indexcleanup_requested = vacrelstats->indexcleanup_requested;
+
 	/* Setup the shared cost-based vacuum delay and launch workers */
 	if (nworkers > 0)
 	{
@@ -2254,7 +2439,8 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
 
 		/* Do vacuum or cleanup of the index */
 		vacuum_one_index(Irel[idx], &(stats[idx]), lvshared, shared_indstats,
-						 dead_tuples, vacrelstats);
+						 dead_tuples, vacrelstats,
+						 vacrelstats->ivstrategies[idx]);
 	}
 
 	/*
@@ -2295,7 +2481,7 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 			skip_parallel_vacuum_index(Irel[i], lps->lvshared))
 			vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
 							 shared_indstats, vacrelstats->dead_tuples,
-							 vacrelstats);
+							 vacrelstats, vacrelstats->ivstrategies[i]);
 	}
 
 	/*
@@ -2315,7 +2501,8 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 static void
 vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 				 LVShared *lvshared, LVSharedIndStats *shared_indstats,
-				 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats)
+				 LVDeadTuples *dead_tuples, LVRelStats *vacrelstats,
+				 IndexVacuumStrategy ivstrat)
 {
 	IndexBulkDeleteResult *bulkdelete_res = NULL;
 
@@ -2338,7 +2525,7 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 						   lvshared->estimated_count, vacrelstats);
 	else
 		lazy_vacuum_index(indrel, stats, dead_tuples,
-						  lvshared->reltuples, vacrelstats);
+						  lvshared->reltuples, vacrelstats, ivstrat);
 
 	/*
 	 * Copy the index bulk-deletion result returned from ambulkdelete and
@@ -2429,7 +2616,8 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
  */
 static void
 lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
-				  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats)
+				  LVDeadTuples *dead_tuples, double reltuples, LVRelStats *vacrelstats,
+				  IndexVacuumStrategy ivstrat)
 {
 	IndexVacuumInfo ivinfo;
 	PGRUsage	ru0;
@@ -2443,7 +2631,9 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	ivinfo.estimated_count = true;
 	ivinfo.message_level = elevel;
 	ivinfo.num_heap_tuples = reltuples;
-	ivinfo.strategy = vac_strategy;
+	ivinfo.strategy = vac_strategy; /* buffer access strategy */
+	ivinfo.will_vacuum_heap = vacrelstats->vacuum_heap;
+	ivinfo.indvac_strategy = ivstrat; /* index vacuum strategy */
 
 	/*
 	 * Update error traceback information.
@@ -2461,11 +2651,17 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	*stats = index_bulk_delete(&ivinfo, *stats,
 							   lazy_tid_reaped, (void *) dead_tuples);
 
-	ereport(elevel,
-			(errmsg("scanned index \"%s\" to remove %d row versions",
-					vacrelstats->indname,
-					dead_tuples->num_tuples),
-			 errdetail_internal("%s", pg_rusage_show(&ru0))));
+	/*
+	 * Report the index bulk-deletion stats. If the index returns the
+	 * statistics and we will do vacuum heap, we can assume it have
+	 * done the index bulk-deletion.
+	 */
+	if (*stats && vacrelstats->vacuum_heap)
+		ereport(elevel,
+				(errmsg("scanned index \"%s\" to remove %d row versions",
+						vacrelstats->indname,
+						dead_tuples->num_tuples),
+				 errdetail_internal("%s", pg_rusage_show(&ru0))));
 
 	/* Revert to the previous phase information for error traceback */
 	restore_vacuum_error_info(vacrelstats, &saved_err_info);
@@ -2498,6 +2694,7 @@ lazy_cleanup_index(Relation indrel,
 
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
+	ivinfo.vacuumcleanup_requested = vacrelstats->indexcleanup_requested;
 
 	/*
 	 * Update error traceback information.
@@ -2844,14 +3041,14 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
  * Return the maximum number of dead tuples we can record.
  */
 static long
-compute_max_dead_tuples(BlockNumber relblocks, bool useindex)
+compute_max_dead_tuples(BlockNumber relblocks, bool hasindex)
 {
 	long		maxtuples;
 	int			vac_work_mem = IsAutoVacuumWorkerProcess() &&
 	autovacuum_work_mem != -1 ?
 	autovacuum_work_mem : maintenance_work_mem;
 
-	if (useindex)
+	if (hasindex)
 	{
 		maxtuples = MAXDEADTUPLES(vac_work_mem * 1024L);
 		maxtuples = Min(maxtuples, INT_MAX);
@@ -2876,18 +3073,21 @@ compute_max_dead_tuples(BlockNumber relblocks, bool useindex)
  * See the comments at the head of this file for rationale.
  */
 static void
-lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
+lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks,
+				 int nindexes)
 {
 	LVDeadTuples *dead_tuples = NULL;
 	long		maxtuples;
 
-	maxtuples = compute_max_dead_tuples(relblocks, vacrelstats->useindex);
+	maxtuples = compute_max_dead_tuples(relblocks, vacrelstats->hasindex);
 
 	dead_tuples = (LVDeadTuples *) palloc(SizeOfDeadTuples(maxtuples));
 	dead_tuples->num_tuples = 0;
 	dead_tuples->max_tuples = (int) maxtuples;
 
 	vacrelstats->dead_tuples = dead_tuples;
+	vacrelstats->ivstrategies =
+		(IndexVacuumStrategy *) palloc0(SizeOfIndVacStrategies(nindexes));
 }
 
 /*
@@ -3223,10 +3423,12 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
 	LVDeadTuples *dead_tuples;
 	BufferUsage *buffer_usage;
 	WalUsage   *wal_usage;
+	IndexVacuumStrategy *ivstrats;
 	bool	   *can_parallel_vacuum;
 	long		maxtuples;
 	Size		est_shared;
 	Size		est_deadtuples;
+	Size		est_ivstrategies;
 	int			nindexes_mwm = 0;
 	int			parallel_workers = 0;
 	int			querylen;
@@ -3320,6 +3522,13 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
 						   mul_size(sizeof(WalUsage), pcxt->nworkers));
 	shm_toc_estimate_keys(&pcxt->estimator, 1);
 
+	/*
+	 * Estimate space for IndexVacuumStrategy -- PARALLEL_VACUUM_KEY_IND_STRATEGY.
+	 */
+	est_ivstrategies = MAXALIGN(SizeOfIndVacStrategies(nindexes));
+	shm_toc_estimate_chunk(&pcxt->estimator, est_ivstrategies);
+	shm_toc_estimate_keys(&pcxt->estimator, 1);
+
 	/* Finally, estimate PARALLEL_VACUUM_KEY_QUERY_TEXT space */
 	if (debug_query_string)
 	{
@@ -3372,6 +3581,11 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
 	shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_WAL_USAGE, wal_usage);
 	lps->wal_usage = wal_usage;
 
+	/* Allocate space for each index strategy */
+	ivstrats = shm_toc_allocate(pcxt->toc, est_ivstrategies);
+	shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_IND_STRATEGY, ivstrats);
+	vacrelstats->ivstrategies = ivstrats;
+
 	/* Store query string for workers */
 	if (debug_query_string)
 	{
@@ -3507,6 +3721,7 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	Relation   *indrels;
 	LVShared   *lvshared;
 	LVDeadTuples *dead_tuples;
+	IndexVacuumStrategy *ivstrats;
 	BufferUsage *buffer_usage;
 	WalUsage   *wal_usage;
 	int			nindexes;
@@ -3548,6 +3763,10 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 												  PARALLEL_VACUUM_KEY_DEAD_TUPLES,
 												  false);
 
+	/* Set vacuum strategy space */
+	ivstrats = shm_toc_lookup(toc, PARALLEL_VACUUM_KEY_IND_STRATEGY, false);
+	vacrelstats.ivstrategies = ivstrats;
+
 	/* Set cost-based vacuum delay */
 	VacuumCostActive = (VacuumCostDelay > 0);
 	VacuumCostBalance = 0;
@@ -3573,6 +3792,9 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	vacrelstats.indname = NULL;
 	vacrelstats.phase = VACUUM_ERRCB_PHASE_UNKNOWN; /* Not yet processing */
 
+	vacrelstats.vacuum_heap = lvshared->vacuum_heap;
+	vacrelstats.indexcleanup_requested = lvshared->indexcleanup_requested;
+
 	/* Setup error traceback support for ereport() */
 	errcallback.callback = vacuum_error_callback;
 	errcallback.arg = &vacrelstats;
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 3d2dbed708..171ba5c2fa 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -678,6 +678,28 @@ index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap)
 	return ntids;
 }
 
+/* ----------------
+ *		index_vacuum_strategy - ask index vacuum strategy
+ *
+ * This callback routine is called just before vacuuming the heap.
+ * Returns IndexVacuumStrategy value to tell the lazy vacuum whether to
+ * do index deletion.
+ * ----------------
+ */
+IndexVacuumStrategy
+index_vacuum_strategy(IndexVacuumInfo *info, struct VacuumParams *params)
+{
+	Relation	indexRelation = info->index;
+
+	RELATION_CHECKS;
+
+	/* amvacuumstrategy is optional; assume do bulk-deletion */
+	if (indexRelation->rd_indam->amvacuumstrategy == NULL)
+		return INDEX_VACUUM_STRATEGY_BULKDELETE;
+
+	return indexRelation->rd_indam->amvacuumstrategy(info, params);
+}
+
 /* ----------------
  *		index_bulk_delete - do mass deletion of index entries
  *
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 289bd3c15d..e00e5fe0a4 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -133,6 +133,7 @@ bthandler(PG_FUNCTION_ARGS)
 	amroutine->ambuild = btbuild;
 	amroutine->ambuildempty = btbuildempty;
 	amroutine->aminsert = btinsert;
+	amroutine->amvacuumstrategy = btvacuumstrategy;
 	amroutine->ambulkdelete = btbulkdelete;
 	amroutine->amvacuumcleanup = btvacuumcleanup;
 	amroutine->amcanreturn = btcanreturn;
@@ -822,6 +823,18 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
 		 */
 		result = true;
 	}
+	else if (!info->vacuumcleanup_requested)
+	{
+		/*
+		 * Skip cleanup if INDEX_CLEANUP is set to false, even if there might
+		 * be a deleted page that can be recycled. If INDEX_CLEANUP continues
+		 * to be disabled, recyclable pages could be left by XID wraparound.
+		 * But in practice it's not so harmful since such workload doesn't need
+		 * to delete and recycle pages in any case and deletion of btree index
+		 * pages is relatively rare.
+		 */
+		result = false;
+	}
 	else if (TransactionIdIsValid(metad->btm_oldest_btpo_xact) &&
 			 GlobalVisCheckRemovableXid(NULL, metad->btm_oldest_btpo_xact))
 	{
@@ -864,6 +877,19 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info)
 	return result;
 }
 
+/*
+ * Choose the vacuum strategy. Do bulk-deletion unless index cleanup
+ * is specified to off.
+ */
+IndexVacuumStrategy
+btvacuumstrategy(IndexVacuumInfo *info, VacuumParams *params)
+{
+	if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
+		return INDEX_VACUUM_STRATEGY_NONE;
+	else
+		return INDEX_VACUUM_STRATEGY_BULKDELETE;
+}
+
 /*
  * Bulk deletion of all index entries pointing to a set of heap tuples.
  * The set of target tuples is specified via a callback routine that tells
@@ -878,6 +904,14 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 	Relation	rel = info->index;
 	BTCycleId	cycleid;
 
+	/*
+	 * Skip deleting index entries if the corresponding heap tuples will
+	 * not be deleted and we want to skip it.
+	 */
+	if (!info->will_vacuum_heap &&
+		info->indvac_strategy == INDEX_VACUUM_STRATEGY_NONE)
+		return stats;
+
 	/* allocate stats if first time through, else re-use existing struct */
 	if (stats == NULL)
 		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index d8b1815061..7b2313590a 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -66,6 +66,7 @@ spghandler(PG_FUNCTION_ARGS)
 	amroutine->ambuild = spgbuild;
 	amroutine->ambuildempty = spgbuildempty;
 	amroutine->aminsert = spginsert;
+	amroutine->amvacuumstrategy = spgvacuumstrategy;
 	amroutine->ambulkdelete = spgbulkdelete;
 	amroutine->amvacuumcleanup = spgvacuumcleanup;
 	amroutine->amcanreturn = spgcanreturn;
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 0d02a02222..f44043d94f 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -894,6 +894,19 @@ spgvacuumscan(spgBulkDeleteState *bds)
 	bds->stats->pages_free = bds->stats->pages_deleted;
 }
 
+/*
+ * Choose the vacuum strategy. Do bulk-deletion unless index cleanup
+ * is specified to off.
+ */
+IndexVacuumStrategy
+spgvacuumstrategy(IndexVacuumInfo *info, VacuumParams *params)
+{
+	if (params->index_cleanup == VACOPT_TERNARY_DISABLED)
+		return INDEX_VACUUM_STRATEGY_NONE;
+	else
+		return INDEX_VACUUM_STRATEGY_BULKDELETE;
+}
+
 /*
  * Bulk deletion of all index entries pointing to a set of heap tuples.
  * The set of target tuples is specified via a callback routine that tells
@@ -907,6 +920,13 @@ spgbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
 {
 	spgBulkDeleteState bds;
 
+	/*
+	 * Skip deleting index entries if the corresponding heap tuples will
+	 * not be deleted.
+	 */
+	if (!info->will_vacuum_heap)
+		return NULL;
+
 	/* allocate stats if first time through, else re-use existing struct */
 	if (stats == NULL)
 		stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
@@ -937,8 +957,11 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
 {
 	spgBulkDeleteState bds;
 
-	/* No-op in ANALYZE ONLY mode */
-	if (info->analyze_only)
+	/*
+	 * No-op in ANALYZE ONLY mode or when user requests to disable index
+	 * cleanup.
+	 */
+	if (info->analyze_only || !info->vacuumcleanup_requested)
 		return stats;
 
 	/*
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index b8cd35e995..30b48d6ccb 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -3401,6 +3401,8 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
 	ivinfo.message_level = DEBUG2;
 	ivinfo.num_heap_tuples = heapRelation->rd_rel->reltuples;
 	ivinfo.strategy = NULL;
+	ivinfo.will_vacuum_heap = true;
+	ivinfo.indvac_strategy = INDEX_VACUUM_STRATEGY_BULKDELETE;
 
 	/*
 	 * Encode TIDs as int8 values for the sort, rather than directly sorting
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 7295cf0215..111addbd6c 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -668,6 +668,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
 			ivinfo.message_level = elevel;
 			ivinfo.num_heap_tuples = onerel->rd_rel->reltuples;
 			ivinfo.strategy = vac_strategy;
+			ivinfo.vacuumcleanup_requested = true;
 
 			stats = index_vacuum_cleanup(&ivinfo, NULL);
 
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 462f9a0f82..4ab20b77e6 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -1870,17 +1870,20 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params)
 	onerelid = onerel->rd_lockInfo.lockRelId;
 	LockRelationIdForSession(&onerelid, lmode);
 
-	/* Set index cleanup option based on reloptions if not yet */
-	if (params->index_cleanup == VACOPT_TERNARY_DEFAULT)
-	{
-		if (onerel->rd_options == NULL ||
-			((StdRdOptions *) onerel->rd_options)->vacuum_index_cleanup)
-			params->index_cleanup = VACOPT_TERNARY_ENABLED;
-		else
-			params->index_cleanup = VACOPT_TERNARY_DISABLED;
-	}
+	/*
+	 * Set index cleanup option if vacuum_index_cleanup reloption is set.
+	 * Otherwise we leave it as 'default', which means that we choose vacuum
+	 * strategy based on the table and index status. See choose_vacuum_strategy().
+	 */
+	if (params->index_cleanup == VACOPT_TERNARY_DEFAULT &&
+		onerel->rd_options != NULL)
+		params->index_cleanup =
+			((StdRdOptions *) onerel->rd_options)->vacuum_index_cleanup;
 
-	/* Set truncate option based on reloptions if not yet */
+	/*
+	 * Set truncate option based on reloptions if not yet. Truncate option
+	 * is true by default.
+	 */
 	if (params->truncate == VACOPT_TERNARY_DEFAULT)
 	{
 		if (onerel->rd_options == NULL ||
diff --git a/src/include/access/amapi.h b/src/include/access/amapi.h
index d357ebb559..548f2033a4 100644
--- a/src/include/access/amapi.h
+++ b/src/include/access/amapi.h
@@ -22,8 +22,9 @@
 struct PlannerInfo;
 struct IndexPath;
 
-/* Likewise, this file shouldn't depend on execnodes.h. */
+/* Likewise, this file shouldn't depend on execnodes.h and vacuum.h. */
 struct IndexInfo;
+struct VacuumParams;
 
 
 /*
@@ -112,6 +113,9 @@ typedef bool (*aminsert_function) (Relation indexRelation,
 								   IndexUniqueCheck checkUnique,
 								   bool indexUnchanged,
 								   struct IndexInfo *indexInfo);
+/* vacuum strategy */
+typedef IndexVacuumStrategy (*amvacuumstrategy_function) (IndexVacuumInfo *info,
+														  struct VacuumParams *params);
 
 /* bulk delete */
 typedef IndexBulkDeleteResult *(*ambulkdelete_function) (IndexVacuumInfo *info,
@@ -259,6 +263,7 @@ typedef struct IndexAmRoutine
 	ambuild_function ambuild;
 	ambuildempty_function ambuildempty;
 	aminsert_function aminsert;
+	amvacuumstrategy_function amvacuumstrategy;
 	ambulkdelete_function ambulkdelete;
 	amvacuumcleanup_function amvacuumcleanup;
 	amcanreturn_function amcanreturn;	/* can be NULL */
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index 0eab1508d3..f164ec1a54 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -21,8 +21,9 @@
 #include "utils/relcache.h"
 #include "utils/snapshot.h"
 
-/* We don't want this file to depend on execnodes.h. */
+/* We don't want this file to depend on execnodes.h and vacuum.h. */
 struct IndexInfo;
+struct VacuumParams;
 
 /*
  * Struct for statistics returned by ambuild
@@ -33,8 +34,17 @@ typedef struct IndexBuildResult
 	double		index_tuples;	/* # of tuples inserted into index */
 } IndexBuildResult;
 
+/* Result value for amvacuumstrategy */
+typedef enum IndexVacuumStrategy
+{
+	INDEX_VACUUM_STRATEGY_NONE,			/* No-op, skip bulk-deletion in this
+										 * vacuum cycle */
+	INDEX_VACUUM_STRATEGY_BULKDELETE	/* Do ambulkdelete */
+} IndexVacuumStrategy;
+
 /*
- * Struct for input arguments passed to ambulkdelete and amvacuumcleanup
+ * Struct for input arguments passed to amvacuumstrategy, ambulkdelete
+ * and amvacuumcleanup
  *
  * num_heap_tuples is accurate only when estimated_count is false;
  * otherwise it's just an estimate (currently, the estimate is the
@@ -50,6 +60,26 @@ typedef struct IndexVacuumInfo
 	int			message_level;	/* ereport level for progress messages */
 	double		num_heap_tuples;	/* tuples remaining in heap */
 	BufferAccessStrategy strategy;	/* access strategy for reads */
+
+	/*
+	 * True if lazy vacuum delete the collected garbage tuples from the
+	 * heap.  If it's false, the index AM can skip index bulk-deletion
+	 * safely.  This field is used only for ambulkdelete.
+	 */
+	bool		will_vacuum_heap;
+
+	/*
+	 * The answer to amvacuumstrategy asked before executing ambulkdelete.
+	 * This field is used only for ambulkdelete.
+	 */
+	IndexVacuumStrategy indvac_strategy;
+
+	/*
+	 * amvacuumcleanup is requested by lazy vacuum. If false, the index AM
+	 * can skip index cleanup. This can be false if INDEX_CLEANUP vacuum option
+	 * is set to false. This field is used only for amvacuumcleanup.
+	 */
+	bool		vacuumcleanup_requested;
 } IndexVacuumInfo;
 
 /*
@@ -174,6 +204,8 @@ extern bool index_getnext_slot(IndexScanDesc scan, ScanDirection direction,
 							   struct TupleTableSlot *slot);
 extern int64 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap);
 
+extern IndexVacuumStrategy index_vacuum_strategy(IndexVacuumInfo *info,
+												 struct VacuumParams *params);
 extern IndexBulkDeleteResult *index_bulk_delete(IndexVacuumInfo *info,
 												IndexBulkDeleteResult *stats,
 												IndexBulkDeleteCallback callback,
diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h
index 670a40b4be..5c48a48917 100644
--- a/src/include/access/gin_private.h
+++ b/src/include/access/gin_private.h
@@ -397,6 +397,8 @@ extern int64 gingetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
 extern void ginInitConsistentFunction(GinState *ginstate, GinScanKey key);
 
 /* ginvacuum.c */
+extern IndexVacuumStrategy ginvacuumstrategy(IndexVacuumInfo *info,
+											 struct VacuumParams *params);
 extern IndexBulkDeleteResult *ginbulkdelete(IndexVacuumInfo *info,
 											IndexBulkDeleteResult *stats,
 											IndexBulkDeleteCallback callback,
diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h
index 553d364e2d..303a18da4d 100644
--- a/src/include/access/gist_private.h
+++ b/src/include/access/gist_private.h
@@ -533,6 +533,8 @@ extern void gistMakeUnionKey(GISTSTATE *giststate, int attno,
 extern XLogRecPtr gistGetFakeLSN(Relation rel);
 
 /* gistvacuum.c */
+extern IndexVacuumStrategy gistvacuumstrategy(IndexVacuumInfo *info,
+											  struct VacuumParams *params);
 extern IndexBulkDeleteResult *gistbulkdelete(IndexVacuumInfo *info,
 											 IndexBulkDeleteResult *stats,
 											 IndexBulkDeleteCallback callback,
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 1cce865be2..4c7e064708 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -372,6 +372,8 @@ extern IndexScanDesc hashbeginscan(Relation rel, int nkeys, int norderbys);
 extern void hashrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
 					   ScanKey orderbys, int norderbys);
 extern void hashendscan(IndexScanDesc scan);
+extern IndexVacuumStrategy hashvacuumstrategy(IndexVacuumInfo *info,
+											  struct VacuumParams *params);
 extern IndexBulkDeleteResult *hashbulkdelete(IndexVacuumInfo *info,
 											 IndexBulkDeleteResult *stats,
 											 IndexBulkDeleteCallback callback,
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index 7c62852e7f..9615194db6 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -563,17 +563,24 @@ do { \
 /*
  * MaxHeapTuplesPerPage is an upper bound on the number of tuples that can
  * fit on one heap page.  (Note that indexes could have more, because they
- * use a smaller tuple header.)  We arrive at the divisor because each tuple
- * must be maxaligned, and it must have an associated line pointer.
+ * use a smaller tuple header.)
  *
- * Note: with HOT, there could theoretically be more line pointers (not actual
- * tuples) than this on a heap page.  However we constrain the number of line
- * pointers to this anyway, to avoid excessive line-pointer bloat and not
- * require increases in the size of work arrays.
+ * We used to constrain the number of line pointers to avlid excessive
+ * line-pointer bloat and not require increases in the size of work arrays,
+ * calculating it using by the size of aligned heap tuple header. But since
+ * index vacuum strategy had entered the picture, accumulating LP_DEAD line
+ * pointers in a heap page has a value for skipping index deletion. So we
+ * relaxed the limitation by considering a certain number of line pointers in
+ * a heap page that don't have heap tuples, calculating it using by 1
+ * MAXALIGN() quantum instead of the aligned size of heap tuple header, 3
+ * MAXALIGN() quantums.
+ *
+ * Please note that increasing this values also affects TID bitmap. There
+ * might be a risk of intrducing performance regression affecting bitmap scans.
  */
 #define MaxHeapTuplesPerPage	\
 	((int) ((BLCKSZ - SizeOfPageHeaderData) / \
-			(MAXALIGN(SizeofHeapTupleHeader) + sizeof(ItemIdData))))
+			(MAXIMUM_ALIGNOF + sizeof(ItemIdData))))
 
 /*
  * MaxAttrSize is a somewhat arbitrary upper limit on the declared size of
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index cad4f2bdeb..ba120d4a80 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -1011,6 +1011,8 @@ extern void btparallelrescan(IndexScanDesc scan);
 extern void btendscan(IndexScanDesc scan);
 extern void btmarkpos(IndexScanDesc scan);
 extern void btrestrpos(IndexScanDesc scan);
+extern IndexVacuumStrategy btvacuumstrategy(IndexVacuumInfo *info,
+											struct VacuumParams *params);
 extern IndexBulkDeleteResult *btbulkdelete(IndexVacuumInfo *info,
 										   IndexBulkDeleteResult *stats,
 										   IndexBulkDeleteCallback callback,
diff --git a/src/include/access/spgist.h b/src/include/access/spgist.h
index 2eb2f421a8..f591b21ef1 100644
--- a/src/include/access/spgist.h
+++ b/src/include/access/spgist.h
@@ -212,6 +212,8 @@ extern bool spggettuple(IndexScanDesc scan, ScanDirection dir);
 extern bool spgcanreturn(Relation index, int attno);
 
 /* spgvacuum.c */
+extern IndexVacuumStrategy spgvacuumstrategy(IndexVacuumInfo *info,
+											 struct VacuumParams *params);
 extern IndexBulkDeleteResult *spgbulkdelete(IndexVacuumInfo *info,
 											IndexBulkDeleteResult *stats,
 											IndexBulkDeleteCallback callback,
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 191cbbd004..f2590c3b6e 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -21,6 +21,7 @@
 #include "parser/parse_node.h"
 #include "storage/buf.h"
 #include "storage/lock.h"
+#include "utils/rel.h"
 #include "utils/relcache.h"
 
 /*
@@ -184,19 +185,6 @@ typedef struct VacAttrStats
 #define VACOPT_SKIPTOAST 0x40	/* don't process the TOAST table, if any */
 #define VACOPT_DISABLE_PAGE_SKIPPING 0x80	/* don't skip any pages */
 
-/*
- * A ternary value used by vacuum parameters.
- *
- * DEFAULT value is used to determine the value based on other
- * configurations, e.g. reloptions.
- */
-typedef enum VacOptTernaryValue
-{
-	VACOPT_TERNARY_DEFAULT = 0,
-	VACOPT_TERNARY_DISABLED,
-	VACOPT_TERNARY_ENABLED,
-} VacOptTernaryValue;
-
 /*
  * Parameters customizing behavior of VACUUM and ANALYZE.
  *
@@ -216,8 +204,10 @@ typedef struct VacuumParams
 	int			log_min_duration;	/* minimum execution threshold in ms at
 									 * which  verbose logs are activated, -1
 									 * to use default */
-	VacOptTernaryValue index_cleanup;	/* Do index vacuum and cleanup,
-										 * default value depends on reloptions */
+	VacOptTernaryValue index_cleanup;	/* Do index vacuum and cleanup. In
+										 * default mode, it's decided based on
+										 * multiple factors. See
+										 * choose_vacuum_strategy. */
 	VacOptTernaryValue truncate;	/* Truncate empty pages at the end,
 									 * default value depends on reloptions */
 
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index 10b63982c0..168dc5d466 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -295,6 +295,20 @@ typedef struct AutoVacOpts
 	float8		analyze_scale_factor;
 } AutoVacOpts;
 
+/*
+ * A ternary value used by vacuum parameters. This value also is used
+ * for VACUUM command options.
+ *
+ * DEFAULT value is used to determine the value based on other
+ * configurations, e.g. reloptions.
+ */
+typedef enum VacOptTernaryValue
+{
+	VACOPT_TERNARY_DEFAULT = 0,
+	VACOPT_TERNARY_DISABLED,
+	VACOPT_TERNARY_ENABLED,
+} VacOptTernaryValue;
+
 typedef struct StdRdOptions
 {
 	int32		vl_len_;		/* varlena header (do not touch directly!) */
@@ -304,7 +318,8 @@ typedef struct StdRdOptions
 	AutoVacOpts autovacuum;		/* autovacuum-related options */
 	bool		user_catalog_table; /* use as an additional catalog relation */
 	int			parallel_workers;	/* max number of parallel workers */
-	bool		vacuum_index_cleanup;	/* enables index vacuuming and cleanup */
+	VacOptTernaryValue	vacuum_index_cleanup;	/* enables index vacuuming
+												 * and cleanup */
 	bool		vacuum_truncate;	/* enables vacuum to truncate a relation */
 } StdRdOptions;
 
diff --git a/src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out b/src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out
index 4d0beaecea..8ad3e998e1 100644
--- a/src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out
+++ b/src/test/modules/test_ginpostinglist/expected/test_ginpostinglist.out
@@ -6,11 +6,11 @@ CREATE EXTENSION test_ginpostinglist;
 SELECT test_ginpostinglist();
 NOTICE:  testing with (0, 1), (0, 2), max 14 bytes
 NOTICE:  encoded 2 item pointers to 10 bytes
-NOTICE:  testing with (0, 1), (0, 291), max 14 bytes
+NOTICE:  testing with (0, 1), (0, 680), max 14 bytes
 NOTICE:  encoded 2 item pointers to 10 bytes
-NOTICE:  testing with (0, 1), (4294967294, 291), max 14 bytes
+NOTICE:  testing with (0, 1), (4294967294, 680), max 14 bytes
 NOTICE:  encoded 1 item pointers to 8 bytes
-NOTICE:  testing with (0, 1), (4294967294, 291), max 16 bytes
+NOTICE:  testing with (0, 1), (4294967294, 680), max 16 bytes
 NOTICE:  encoded 2 item pointers to 16 bytes
  test_ginpostinglist 
 ---------------------
-- 
2.27.0

