Hi,

I've rebased patches on the newest master.

--
Best regards,
Daniil Davydov
From 57ea4c318664f6e0b72040d14e7a7d9f82d2036c Mon Sep 17 00:00:00 2001
From: Daniil Davidov <[email protected]>
Date: Mon, 18 Aug 2025 15:14:25 +0700
Subject: [PATCH v12 2/4] Logging for parallel autovacuum

---
 src/backend/access/heap/vacuumlazy.c  | 27 +++++++++++++++++++++++++--
 src/backend/commands/vacuumparallel.c | 20 ++++++++++++++------
 src/include/commands/vacuum.h         | 16 ++++++++++++++--
 src/tools/pgindent/typedefs.list      |  1 +
 4 files changed, 54 insertions(+), 10 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index d2b031fdd06..d364cde5fe5 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -347,6 +347,12 @@ typedef struct LVRelState
 
 	/* Instrumentation counters */
 	int			num_index_scans;
+
+	/*
+	 * Number of planned and actually launched parallel workers for all index
+	 * scans, or NULL
+	 */
+	PVWorkersUsage *workers_usage;
 	/* Counters that follow are only for scanned_pages */
 	int64		tuples_deleted; /* # deleted from table */
 	int64		tuples_frozen;	/* # newly frozen */
@@ -700,6 +706,16 @@ heap_vacuum_rel(Relation rel, const VacuumParams params,
 		indnames = palloc(sizeof(char *) * vacrel->nindexes);
 		for (int i = 0; i < vacrel->nindexes; i++)
 			indnames[i] = pstrdup(RelationGetRelationName(vacrel->indrels[i]));
+
+		/*
+		 * Allocate space for workers usage statistics. Thus, we explicitly
+		 * make clear that such statistics must be accumulated. For now, this
+		 * is used only by autovacuum leader worker, because it must log it in
+		 * the end of table processing.
+		 */
+		vacrel->workers_usage = AmAutoVacuumWorkerProcess() ?
+			(PVWorkersUsage *) palloc0(sizeof(PVWorkersUsage)) :
+			NULL;
 	}
 
 	/*
@@ -1024,6 +1040,11 @@ heap_vacuum_rel(Relation rel, const VacuumParams params,
 							 vacrel->relnamespace,
 							 vacrel->relname,
 							 vacrel->num_index_scans);
+			if (vacrel->workers_usage)
+				appendStringInfo(&buf,
+								 _("workers usage statistics for all of index scans : launched in total = %d, planned in total = %d\n"),
+								 vacrel->workers_usage->nlaunched,
+								 vacrel->workers_usage->nplanned);
 			appendStringInfo(&buf, _("pages: %u removed, %u remain, %u scanned (%.2f%% of total), %u eagerly scanned\n"),
 							 vacrel->removed_pages,
 							 new_rel_pages,
@@ -2653,7 +2674,8 @@ lazy_vacuum_all_indexes(LVRelState *vacrel)
 	{
 		/* Outsource everything to parallel variant */
 		parallel_vacuum_bulkdel_all_indexes(vacrel->pvs, old_live_tuples,
-											vacrel->num_index_scans);
+											vacrel->num_index_scans,
+											vacrel->workers_usage);
 
 		/*
 		 * Do a postcheck to consider applying wraparound failsafe now.  Note
@@ -3085,7 +3107,8 @@ lazy_cleanup_all_indexes(LVRelState *vacrel)
 		/* Outsource everything to parallel variant */
 		parallel_vacuum_cleanup_all_indexes(vacrel->pvs, reltuples,
 											vacrel->num_index_scans,
-											estimated_count);
+											estimated_count,
+											vacrel->workers_usage);
 	}
 
 	/* Reset the progress counters */
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index acd53b85b1c..9a258238650 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -227,7 +227,7 @@ struct ParallelVacuumState
 static int	parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested,
 											bool *will_parallel_vacuum);
 static void parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans,
-												bool vacuum);
+												bool vacuum, PVWorkersUsage *wusage);
 static void parallel_vacuum_process_safe_indexes(ParallelVacuumState *pvs);
 static void parallel_vacuum_process_unsafe_indexes(ParallelVacuumState *pvs);
 static void parallel_vacuum_process_one_index(ParallelVacuumState *pvs, Relation indrel,
@@ -502,7 +502,7 @@ parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)
  */
 void
 parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tuples,
-									int num_index_scans)
+									int num_index_scans, PVWorkersUsage *wusage)
 {
 	Assert(!IsParallelWorker());
 
@@ -513,7 +513,7 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup
 	pvs->shared->reltuples = num_table_tuples;
 	pvs->shared->estimated_count = true;
 
-	parallel_vacuum_process_all_indexes(pvs, num_index_scans, true);
+	parallel_vacuum_process_all_indexes(pvs, num_index_scans, true, wusage);
 }
 
 /*
@@ -521,7 +521,8 @@ parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs, long num_table_tup
  */
 void
 parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tuples,
-									int num_index_scans, bool estimated_count)
+									int num_index_scans, bool estimated_count,
+									PVWorkersUsage *wusage)
 {
 	Assert(!IsParallelWorker());
 
@@ -533,7 +534,7 @@ parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs, long num_table_tup
 	pvs->shared->reltuples = num_table_tuples;
 	pvs->shared->estimated_count = estimated_count;
 
-	parallel_vacuum_process_all_indexes(pvs, num_index_scans, false);
+	parallel_vacuum_process_all_indexes(pvs, num_index_scans, false, wusage);
 }
 
 /*
@@ -618,7 +619,7 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested,
  */
 static void
 parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scans,
-									bool vacuum)
+									bool vacuum, PVWorkersUsage *wusage)
 {
 	int			nworkers;
 	PVIndVacStatus new_status;
@@ -742,6 +743,13 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan
 									 "launched %d parallel vacuum workers for index cleanup (planned: %d)",
 									 pvs->pcxt->nworkers_launched),
 							pvs->pcxt->nworkers_launched, nworkers)));
+
+		/* Remember these values, if we asked to. */
+		if (wusage != NULL)
+		{
+			wusage->nlaunched += pvs->pcxt->nworkers_launched;
+			wusage->nplanned += nworkers;
+		}
 	}
 
 	/* Vacuum the indexes that can be processed by only leader process */
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 1f3290c7fbf..90709ca3107 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -300,6 +300,16 @@ typedef struct VacDeadItemsInfo
 	int64		num_items;		/* current # of entries */
 } VacDeadItemsInfo;
 
+/*
+ * PVWorkersUsage stores information about total number of launched and planned
+ * workers during parallel vacuum.
+ */
+typedef struct PVWorkersUsage
+{
+	int			nlaunched;
+	int			nplanned;
+} PVWorkersUsage;
+
 /* GUC parameters */
 extern PGDLLIMPORT int default_statistics_target;	/* PGDLLIMPORT for PostGIS */
 extern PGDLLIMPORT int vacuum_freeze_min_age;
@@ -394,11 +404,13 @@ extern TidStore *parallel_vacuum_get_dead_items(ParallelVacuumState *pvs,
 extern void parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs);
 extern void parallel_vacuum_bulkdel_all_indexes(ParallelVacuumState *pvs,
 												long num_table_tuples,
-												int num_index_scans);
+												int num_index_scans,
+												PVWorkersUsage *wusage);
 extern void parallel_vacuum_cleanup_all_indexes(ParallelVacuumState *pvs,
 												long num_table_tuples,
 												int num_index_scans,
-												bool estimated_count);
+												bool estimated_count,
+												PVWorkersUsage *wusage);
 extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc);
 
 /* in commands/analyze.c */
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 43fe3bcd593..830763eb2fa 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -2372,6 +2372,7 @@ PullFilterOps
 PushFilter
 PushFilterOps
 PushFunction
+PVWorkersUsage
 PyCFunction
 PyMethodDef
 PyModuleDef
-- 
2.43.0

From 2217fc7b293c267ab497c84251dae31c0bfda7e9 Mon Sep 17 00:00:00 2001
From: Daniil Davidov <[email protected]>
Date: Tue, 28 Oct 2025 17:47:13 +0700
Subject: [PATCH v12] Parallel index autovacuum

---
 src/backend/access/common/reloptions.c        |  11 ++
 src/backend/commands/vacuumparallel.c         |  42 ++++-
 src/backend/postmaster/autovacuum.c           | 163 +++++++++++++++++-
 src/backend/utils/init/globals.c              |   1 +
 src/backend/utils/misc/guc.c                  |   8 +-
 src/backend/utils/misc/guc_parameters.dat     |   9 +
 src/backend/utils/misc/postgresql.conf.sample |   1 +
 src/bin/psql/tab-complete.in.c                |   1 +
 src/include/miscadmin.h                       |   1 +
 src/include/postmaster/autovacuum.h           |   5 +
 src/include/utils/rel.h                       |   7 +
 11 files changed, 239 insertions(+), 10 deletions(-)

diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 9e288dfecbf..3cc29d4454a 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -222,6 +222,15 @@ static relopt_int intRelOpts[] =
 		},
 		SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
 	},
+	{
+		{
+			"autovacuum_parallel_workers",
+			"Maximum number of parallel autovacuum workers that can be used for processing this table.",
+			RELOPT_KIND_HEAP,
+			ShareUpdateExclusiveLock
+		},
+		-1, -1, 1024
+	},
 	{
 		{
 			"autovacuum_vacuum_threshold",
@@ -1881,6 +1890,8 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
 		{"fillfactor", RELOPT_TYPE_INT, offsetof(StdRdOptions, fillfactor)},
 		{"autovacuum_enabled", RELOPT_TYPE_BOOL,
 		offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, enabled)},
+		{"autovacuum_parallel_workers", RELOPT_TYPE_INT,
+		offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, autovacuum_parallel_workers)},
 		{"autovacuum_vacuum_threshold", RELOPT_TYPE_INT,
 		offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, vacuum_threshold)},
 		{"autovacuum_vacuum_max_threshold", RELOPT_TYPE_INT,
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 0feea1d30ec..acd53b85b1c 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -1,7 +1,9 @@
 /*-------------------------------------------------------------------------
  *
  * vacuumparallel.c
- *	  Support routines for parallel vacuum execution.
+ *	  Support routines for parallel vacuum and autovacuum execution. In the
+ *	  comments below, the word "vacuum" will refer to both vacuum and
+ *	  autovacuum.
  *
  * This file contains routines that are intended to support setting up, using,
  * and tearing down a ParallelVacuumState.
@@ -34,6 +36,7 @@
 #include "executor/instrument.h"
 #include "optimizer/paths.h"
 #include "pgstat.h"
+#include "postmaster/autovacuum.h"
 #include "storage/bufmgr.h"
 #include "tcop/tcopprot.h"
 #include "utils/lsyscache.h"
@@ -373,8 +376,9 @@ parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,
 	shared->queryid = pgstat_get_my_query_id();
 	shared->maintenance_work_mem_worker =
 		(nindexes_mwm > 0) ?
-		maintenance_work_mem / Min(parallel_workers, nindexes_mwm) :
-		maintenance_work_mem;
+		vac_work_mem / Min(parallel_workers, nindexes_mwm) :
+		vac_work_mem;
+
 	shared->dead_items_info.max_bytes = vac_work_mem * (size_t) 1024;
 
 	/* Prepare DSA space for dead items */
@@ -553,12 +557,17 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested,
 	int			nindexes_parallel_bulkdel = 0;
 	int			nindexes_parallel_cleanup = 0;
 	int			parallel_workers;
+	int			max_workers;
+
+	max_workers = AmAutoVacuumWorkerProcess() ?
+		autovacuum_max_parallel_workers :
+		max_parallel_maintenance_workers;
 
 	/*
 	 * We don't allow performing parallel operation in standalone backend or
 	 * when parallelism is disabled.
 	 */
-	if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0)
+	if (!IsUnderPostmaster || max_workers == 0)
 		return 0;
 
 	/*
@@ -597,8 +606,8 @@ parallel_vacuum_compute_workers(Relation *indrels, int nindexes, int nrequested,
 	parallel_workers = (nrequested > 0) ?
 		Min(nrequested, nindexes_parallel) : nindexes_parallel;
 
-	/* Cap by max_parallel_maintenance_workers */
-	parallel_workers = Min(parallel_workers, max_parallel_maintenance_workers);
+	/* Cap by GUC variable */
+	parallel_workers = Min(parallel_workers, max_workers);
 
 	return parallel_workers;
 }
@@ -646,6 +655,13 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan
 	 */
 	nworkers = Min(nworkers, pvs->pcxt->nworkers);
 
+	/*
+	 * Reserve workers in autovacuum global state. Note, that we may be given
+	 * fewer workers than we requested.
+	 */
+	if (AmAutoVacuumWorkerProcess() && nworkers > 0)
+		nworkers = AutoVacuumReserveParallelWorkers(nworkers);
+
 	/*
 	 * Set index vacuum status and mark whether parallel vacuum worker can
 	 * process it.
@@ -690,6 +706,16 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan
 
 		LaunchParallelWorkers(pvs->pcxt);
 
+		if (AmAutoVacuumWorkerProcess() &&
+			pvs->pcxt->nworkers_launched < nworkers)
+		{
+			/*
+			 * Tell autovacuum that we could not launch all the previously
+			 * reserved workers.
+			 */
+			AutoVacuumReleaseParallelWorkers(nworkers - pvs->pcxt->nworkers_launched);
+		}
+
 		if (pvs->pcxt->nworkers_launched > 0)
 		{
 			/*
@@ -738,6 +764,10 @@ parallel_vacuum_process_all_indexes(ParallelVacuumState *pvs, int num_index_scan
 
 		for (int i = 0; i < pvs->pcxt->nworkers_launched; i++)
 			InstrAccumParallelQuery(&pvs->buffer_usage[i], &pvs->wal_usage[i]);
+
+		/* Also release all previously reserved parallel autovacuum workers */
+		if (AmAutoVacuumWorkerProcess() && pvs->pcxt->nworkers_launched > 0)
+			AutoVacuumReleaseParallelWorkers(pvs->pcxt->nworkers_launched);
 	}
 
 	/*
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 5084af7dfb6..9499d4f0c12 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -151,6 +151,12 @@ int			Log_autoanalyze_min_duration = 600000;
 static double av_storage_param_cost_delay = -1;
 static int	av_storage_param_cost_limit = -1;
 
+/*
+ * Variable to keep number of currently reserved parallel autovacuum workers.
+ * It is only relevant for parallel autovacuum leader process.
+ */
+static int	av_nworkers_reserved = 0;
+
 /* Flags set by signal handlers */
 static volatile sig_atomic_t got_SIGUSR2 = false;
 
@@ -285,6 +291,8 @@ typedef struct AutoVacuumWorkItem
  * av_workItems		work item array
  * av_nworkersForBalance the number of autovacuum workers to use when
  * 					calculating the per worker cost limit
+ * av_freeParallelWorkers the number of free parallel autovacuum workers
+ * av_maxParallelWorkers the maximum number of parallel autovacuum workers
  *
  * This struct is protected by AutovacuumLock, except for av_signal and parts
  * of the worker list (see above).
@@ -299,6 +307,8 @@ typedef struct
 	WorkerInfo	av_startingWorker;
 	AutoVacuumWorkItem av_workItems[NUM_WORKITEMS];
 	pg_atomic_uint32 av_nworkersForBalance;
+	uint32		av_freeParallelWorkers;
+	uint32		av_maxParallelWorkers;
 } AutoVacuumShmemStruct;
 
 static AutoVacuumShmemStruct *AutoVacuumShmem;
@@ -364,6 +374,7 @@ static void autovac_report_workitem(AutoVacuumWorkItem *workitem,
 static void avl_sigusr2_handler(SIGNAL_ARGS);
 static bool av_worker_available(void);
 static void check_av_worker_gucs(void);
+static void adjust_free_parallel_workers(int prev_max_parallel_workers);
 
 
 
@@ -763,6 +774,8 @@ ProcessAutoVacLauncherInterrupts(void)
 	if (ConfigReloadPending)
 	{
 		int			autovacuum_max_workers_prev = autovacuum_max_workers;
+		int			autovacuum_max_parallel_workers_prev =
+			autovacuum_max_parallel_workers;
 
 		ConfigReloadPending = false;
 		ProcessConfigFile(PGC_SIGHUP);
@@ -779,6 +792,15 @@ ProcessAutoVacLauncherInterrupts(void)
 		if (autovacuum_max_workers_prev != autovacuum_max_workers)
 			check_av_worker_gucs();
 
+		/*
+		 * If autovacuum_max_parallel_workers changed, we must take care of
+		 * the correct value of available parallel autovacuum workers in
+		 * shmem.
+		 */
+		if (autovacuum_max_parallel_workers_prev !=
+			autovacuum_max_parallel_workers)
+			adjust_free_parallel_workers(autovacuum_max_parallel_workers_prev);
+
 		/* rebuild the list in case the naptime changed */
 		rebuild_database_list(InvalidOid);
 	}
@@ -1383,6 +1405,17 @@ avl_sigusr2_handler(SIGNAL_ARGS)
  *					  AUTOVACUUM WORKER CODE
  ********************************************************************/
 
+/*
+ * If parallel autovacuum leader is finishing due to FATAL error, make sure
+ * that all reserved workers are released.
+ */
+static void
+autovacuum_worker_before_shmem_exit(int code, Datum arg)
+{
+	if (code != 0)
+		AutoVacuumReleaseAllParallelWorkers();
+}
+
 /*
  * Main entry point for autovacuum worker processes.
  */
@@ -1429,6 +1462,8 @@ AutoVacWorkerMain(const void *startup_data, size_t startup_data_len)
 	pqsignal(SIGFPE, FloatExceptionHandler);
 	pqsignal(SIGCHLD, SIG_DFL);
 
+	before_shmem_exit(autovacuum_worker_before_shmem_exit, 0);
+
 	/*
 	 * Create a per-backend PGPROC struct in shared memory.  We must do this
 	 * before we can use LWLocks or access any shared memory.
@@ -2480,6 +2515,12 @@ do_autovacuum(void)
 		}
 		PG_CATCH();
 		{
+			/*
+			 * Parallel autovacuum can reserve parallel workers. Make sure that
+			 * all reserved workers are released.
+			 */
+			AutoVacuumReleaseAllParallelWorkers();
+
 			/*
 			 * Abort the transaction, start a new one, and proceed with the
 			 * next table in our list.
@@ -2877,8 +2918,12 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
 		 */
 		tab->at_params.index_cleanup = VACOPTVALUE_UNSPECIFIED;
 		tab->at_params.truncate = VACOPTVALUE_UNSPECIFIED;
-		/* As of now, we don't support parallel vacuum for autovacuum */
-		tab->at_params.nworkers = -1;
+
+		/* Decide whether we need to process indexes of table in parallel. */
+		tab->at_params.nworkers = avopts
+			? avopts->autovacuum_parallel_workers
+			: -1;
+
 		tab->at_params.freeze_min_age = freeze_min_age;
 		tab->at_params.freeze_table_age = freeze_table_age;
 		tab->at_params.multixact_freeze_min_age = multixact_freeze_min_age;
@@ -3360,6 +3405,85 @@ AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId,
 	return result;
 }
 
+/*
+ * In order to meet the 'autovacuum_max_parallel_workers' limit, leader
+ * autovacuum process must call this function. It returns the number of
+ * parallel workers that actually can be launched and reserves these workers
+ * (if any) in global autovacuum state.
+ *
+ * NOTE: We will try to provide as many workers as requested, even if caller
+ * will occupy all available workers.
+ */
+int
+AutoVacuumReserveParallelWorkers(int nworkers)
+{
+	int			nreserved;
+
+	/* Only leader worker can call this function. */
+	Assert(AmAutoVacuumWorkerProcess() && !IsParallelWorker());
+
+	/*
+	 * We can only reserve workers at the beginning of parallel index
+	 * processing, so we must not have any reserved workers right now.
+	 */
+	Assert(av_nworkers_reserved == 0);
+
+	LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+
+	/* Provide as many workers as we can. */
+	nreserved = Min(AutoVacuumShmem->av_freeParallelWorkers, nworkers);
+	AutoVacuumShmem->av_freeParallelWorkers -= nworkers;
+
+	/* Remember how many workers we have reserved. */
+	av_nworkers_reserved += nworkers;
+
+	LWLockRelease(AutovacuumLock);
+	return nreserved;
+}
+
+/*
+ * Leader autovacuum process must call this function in order to update global
+ * autovacuum state, so other leaders will be able to use these parallel
+ * workers.
+ *
+ * 'nworkers' - how many workers caller wants to release.
+ */
+void
+AutoVacuumReleaseParallelWorkers(int nworkers)
+{
+	/* Only leader worker can call this function. */
+	Assert(AmAutoVacuumWorkerProcess() && !IsParallelWorker());
+
+	LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+
+	/*
+	 * If the maximum number of parallel workers was reduced during execution,
+	 * we must cap available workers number by its new value.
+	 */
+	AutoVacuumShmem->av_freeParallelWorkers =
+		Min(AutoVacuumShmem->av_freeParallelWorkers + nworkers,
+			AutoVacuumShmem->av_maxParallelWorkers);
+
+	/* Don't have to remember these workers anymore. */
+	av_nworkers_reserved -= nworkers;
+
+	LWLockRelease(AutovacuumLock);
+}
+
+/*
+ * Same as above, but release *all* parallel workers, that were reserved by
+ * current leader autovacuum process.
+ */
+void
+AutoVacuumReleaseAllParallelWorkers(void)
+{
+	/* Only leader worker can call this function. */
+	Assert(AmAutoVacuumWorkerProcess() && !IsParallelWorker());
+
+	if (av_nworkers_reserved > 0)
+		AutoVacuumReleaseParallelWorkers(av_nworkers_reserved);
+}
+
 /*
  * autovac_init
  *		This is called at postmaster initialization.
@@ -3420,6 +3544,10 @@ AutoVacuumShmemInit(void)
 		Assert(!found);
 
 		AutoVacuumShmem->av_launcherpid = 0;
+		AutoVacuumShmem->av_maxParallelWorkers =
+			Min(autovacuum_max_parallel_workers, max_worker_processes);
+		AutoVacuumShmem->av_freeParallelWorkers =
+			AutoVacuumShmem->av_maxParallelWorkers;
 		dclist_init(&AutoVacuumShmem->av_freeWorkers);
 		dlist_init(&AutoVacuumShmem->av_runningWorkers);
 		AutoVacuumShmem->av_startingWorker = NULL;
@@ -3501,3 +3629,34 @@ check_av_worker_gucs(void)
 				 errdetail("The server will only start up to \"autovacuum_worker_slots\" (%d) autovacuum workers at a given time.",
 						   autovacuum_worker_slots)));
 }
+
+/*
+ * Make sure that number of free parallel workers corresponds to the
+ * autovacuum_max_parallel_workers parameter (after it was changed).
+ */
+static void
+adjust_free_parallel_workers(int prev_max_parallel_workers)
+{
+	LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+
+	/*
+	 * Cap the number of free workers by new parameter's value, if needed.
+	 */
+	AutoVacuumShmem->av_freeParallelWorkers =
+		Min(AutoVacuumShmem->av_freeParallelWorkers,
+			autovacuum_max_parallel_workers);
+
+	if (autovacuum_max_parallel_workers > prev_max_parallel_workers)
+	{
+		/*
+		 * If user wants to increase number of parallel autovacuum workers, we
+		 * must increase number of free workers.
+		 */
+		AutoVacuumShmem->av_freeParallelWorkers +=
+			(autovacuum_max_parallel_workers - prev_max_parallel_workers);
+	}
+
+	AutoVacuumShmem->av_maxParallelWorkers = autovacuum_max_parallel_workers;
+
+	LWLockRelease(AutovacuumLock);
+}
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index d31cb45a058..fd00d6f89dc 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -143,6 +143,7 @@ int			NBuffers = 16384;
 int			MaxConnections = 100;
 int			max_worker_processes = 8;
 int			max_parallel_workers = 8;
+int			autovacuum_max_parallel_workers = 0;
 int			MaxBackends = 0;
 
 /* GUC parameters for vacuum */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index a82286cc98a..e7c5982da2a 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3387,9 +3387,13 @@ set_config_with_handle(const char *name, config_handle *handle,
 	 *
 	 * Also allow normal setting if the GUC is marked GUC_ALLOW_IN_PARALLEL.
 	 *
-	 * Other changes might need to affect other workers, so forbid them.
+	 * Other changes might need to affect other workers, so forbid them. Note,
+	 * that parallel autovacuum leader is an exception, because only cost-based
+	 * delays need to be affected also to parallel vacuum workers, and we will
+	 * handle it elsewhere if appropriate.
 	 */
-	if (IsInParallelMode() && changeVal && action != GUC_ACTION_SAVE &&
+	if (IsInParallelMode() && !AmAutoVacuumWorkerProcess() && changeVal &&
+		action != GUC_ACTION_SAVE &&
 		(record->flags & GUC_ALLOW_IN_PARALLEL) == 0)
 	{
 		ereport(elevel,
diff --git a/src/backend/utils/misc/guc_parameters.dat b/src/backend/utils/misc/guc_parameters.dat
index d6fc8333850..5fbda66b3d4 100644
--- a/src/backend/utils/misc/guc_parameters.dat
+++ b/src/backend/utils/misc/guc_parameters.dat
@@ -2129,6 +2129,15 @@
   max => 'MAX_BACKENDS',
 },
 
+{ name => 'autovacuum_max_parallel_workers', type => 'int', context => 'PGC_SIGHUP', group => 'VACUUM_AUTOVACUUM',
+  short_desc => 'Maximum number of parallel autovacuum workers, that can be taken from bgworkers pool.',
+  long_desc => 'This parameter is capped by "max_worker_processes" (not by "autovacuum_max_workers"!).',
+  variable => 'autovacuum_max_parallel_workers',
+  boot_val => '0',
+  min => '0',
+  max => 'MAX_BACKENDS',
+},
+
 { name => 'max_parallel_maintenance_workers', type => 'int', context => 'PGC_USERSET', group => 'RESOURCES_WORKER_PROCESSES',
   short_desc => 'Sets the maximum number of parallel processes per maintenance operation.',
   variable => 'max_parallel_maintenance_workers',
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index f62b61967ef..b3e471ed33e 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -691,6 +691,7 @@
 autovacuum_worker_slots = 16	# autovacuum worker slots to allocate
 					# (change requires restart)
 #autovacuum_max_workers = 3		# max number of autovacuum subprocesses
+#autovacuum_max_parallel_workers = 0	# disabled by default and limited by max_worker_processes
 #autovacuum_naptime = 1min		# time between autovacuum runs
 #autovacuum_vacuum_threshold = 50	# min number of row updates before
 					# vacuum
diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c
index 36ea6a4d557..d89da606920 100644
--- a/src/bin/psql/tab-complete.in.c
+++ b/src/bin/psql/tab-complete.in.c
@@ -1412,6 +1412,7 @@ static const char *const table_storage_parameters[] = {
 	"autovacuum_multixact_freeze_max_age",
 	"autovacuum_multixact_freeze_min_age",
 	"autovacuum_multixact_freeze_table_age",
+	"autovacuum_parallel_workers",
 	"autovacuum_vacuum_cost_delay",
 	"autovacuum_vacuum_cost_limit",
 	"autovacuum_vacuum_insert_scale_factor",
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 1bef98471c3..85926415657 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -177,6 +177,7 @@ extern PGDLLIMPORT int MaxBackends;
 extern PGDLLIMPORT int MaxConnections;
 extern PGDLLIMPORT int max_worker_processes;
 extern PGDLLIMPORT int max_parallel_workers;
+extern PGDLLIMPORT int autovacuum_max_parallel_workers;
 
 extern PGDLLIMPORT int commit_timestamp_buffers;
 extern PGDLLIMPORT int multixact_member_buffers;
diff --git a/src/include/postmaster/autovacuum.h b/src/include/postmaster/autovacuum.h
index 023ac6d5fa8..f4b93b44531 100644
--- a/src/include/postmaster/autovacuum.h
+++ b/src/include/postmaster/autovacuum.h
@@ -65,6 +65,11 @@ pg_noreturn extern void AutoVacWorkerMain(const void *startup_data, size_t start
 extern bool AutoVacuumRequestWork(AutoVacuumWorkItemType type,
 								  Oid relationId, BlockNumber blkno);
 
+/* parallel autovacuum stuff */
+extern int	AutoVacuumReserveParallelWorkers(int nworkers);
+extern void AutoVacuumReleaseParallelWorkers(int nworkers);
+extern void AutoVacuumReleaseAllParallelWorkers(void);
+
 /* shared memory stuff */
 extern Size AutoVacuumShmemSize(void);
 extern void AutoVacuumShmemInit(void);
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index 80286076a11..e879fdcfc69 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -311,6 +311,13 @@ typedef struct ForeignKeyCacheInfo
 typedef struct AutoVacOpts
 {
 	bool		enabled;
+
+	/*
+	 * Max number of parallel autovacuum workers. If value is 0 then parallel
+	 * degree will computed based on number of indexes.
+	 */
+	int			autovacuum_parallel_workers;
+
 	int			vacuum_threshold;
 	int			vacuum_max_threshold;
 	int			vacuum_ins_threshold;
-- 
2.43.0

Reply via email to