Rebased on top of 007491979461ff10d487e1da9bcc87f2fd834f26

Also, I was thinking that lazy_scan_heap doesn't needs to do this:

+           /* Pop the error context stack while calling vacuum */
+           error_context_stack = errcallback.previous;
...
+           /* Set the error context while continuing heap scan */
+           error_context_stack = &errcallback;

It seems to me that's not actually necessary, since lazy_vacuum_heap will just
*push* a context handler onto the stack, and then pop it back off.  We don't
need to pop our context beforehand.  We also vacuum the FSM, and one might say
that we shouldn't report "...while scanning block number..." if it was
"vacuuming FSM" instead of "scanning heap", to which I would reply that either:
vacuuming FSM could be considered a part of scanning heap??  Or, maybe we
should add an additional callback for that, which is only not very nice since
we'd need to add a PROGRESS enum for which we don't actually report PROGRESS
(or stop using that enum).

I tested using variations on this that works as expected, that context is
correct during vacuum while scanning and after vacuum while scanning:

template1=# SET statement_timeout=0; SET maintenance_work_mem='1MB'; DROP TABLE 
tt; CREATE UNLOGGED TABLE tt(i int); INSERT INTO tt SELECT 
generate_series(1,399999); CREATE INDEX ON tt(i); UPDATE tt SET i=i-1; SET 
statement_timeout=1222; VACUUM VERBOSE tt;

>From 91158171f75cd20e69b18843dd3b6525961e4e8b Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Thu, 12 Dec 2019 20:54:37 -0600
Subject: [PATCH v21 1/2] vacuum errcontext to show block being processed

Discussion:
https://www.postgresql.org/message-id/20191120210600.gc30...@telsasoft.com
---
 src/backend/access/heap/vacuumlazy.c | 130 ++++++++++++++++++++++++++++++++++-
 1 file changed, 129 insertions(+), 1 deletion(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 03c43ef..9e69294 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -292,6 +292,14 @@ typedef struct LVRelStats
 	bool		lock_waiter_detected;
 } LVRelStats;
 
+typedef struct
+{
+	char 		*relnamespace;
+	char		*relname;
+	char 		*indname;
+	BlockNumber blkno;	/* used only for heap operations */
+	int			phase;	/* Reusing same enums as for progress reporting */
+} vacuum_error_callback_arg;
 
 /* A few variables that don't seem worth passing around as parameters */
 static int	elevel = -1;
@@ -361,6 +369,9 @@ static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats,
 								LVParallelState *lps, int nindexes);
 static LVSharedIndStats *get_indstats(LVShared *lvshared, int n);
 static bool skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared);
+static void vacuum_error_callback(void *arg);
+static void init_vacuum_error_callback(ErrorContextCallback *errcallback,
+		vacuum_error_callback_arg *errcbarg, Relation onerel, int phase);
 
 
 /*
@@ -724,6 +735,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		PROGRESS_VACUUM_MAX_DEAD_TUPLES
 	};
 	int64		initprog_val[3];
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	pg_rusage_init(&ru0);
 
@@ -870,6 +883,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	else
 		skipping_blocks = false;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+					PROGRESS_VACUUM_PHASE_SCAN_HEAP);
+	error_context_stack = &errcallback;
+
 	for (blkno = 0; blkno < nblocks; blkno++)
 	{
 		Buffer		buf;
@@ -891,6 +909,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 #define FORCE_CHECK_PAGE() \
 		(blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
 
+		errcbarg.blkno = blkno;
+
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
 		if (blkno == next_unskippable_block)
@@ -987,6 +1007,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				vmbuffer = InvalidBuffer;
 			}
 
+			/* Pop the error context stack while calling vacuum */
+			error_context_stack = errcallback.previous;
+
 			/* Work on all the indexes, then the heap */
 			lazy_vacuum_all_indexes(onerel, Irel, indstats,
 									vacrelstats, lps, nindexes);
@@ -1011,6 +1034,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			/* Report that we are once again scanning the heap */
 			pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 										 PROGRESS_VACUUM_PHASE_SCAN_HEAP);
+
+			/* Set the error context while continuing heap scan */
+			error_context_stack = &errcallback;
 		}
 
 		/*
@@ -1597,6 +1623,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			RecordPageWithFreeSpace(onerel, blkno, freespace);
 	}
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	/* report that everything is scanned and vacuumed */
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
@@ -1772,14 +1801,21 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 	int			npages;
 	PGRUsage	ru0;
 	Buffer		vmbuffer = InvalidBuffer;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	/* Report that we are now vacuuming the heap */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 								 PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
 
 	pg_rusage_init(&ru0);
-	npages = 0;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+			PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
+	error_context_stack = &errcallback;
+
+	npages = 0;
 	tupindex = 0;
 	while (tupindex < vacrelstats->dead_tuples->num_tuples)
 	{
@@ -1791,6 +1827,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		vacuum_delay_point();
 
 		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
+		errcbarg.blkno = tblk;
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
@@ -1811,6 +1848,9 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 		npages++;
 	}
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (BufferIsValid(vmbuffer))
 	{
 		ReleaseBuffer(vmbuffer);
@@ -2318,6 +2358,8 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	IndexVacuumInfo ivinfo;
 	const char *msg;
 	PGRUsage	ru0;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	pg_rusage_init(&ru0);
 
@@ -2329,10 +2371,18 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, indrel,
+			PROGRESS_VACUUM_PHASE_VACUUM_INDEX);
+	error_context_stack = &errcallback;
+
 	/* Do bulk deletion */
 	*stats = index_bulk_delete(&ivinfo, *stats,
 							   lazy_tid_reaped, (void *) dead_tuples);
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (IsParallelWorker())
 		msg = gettext_noop("scanned index \"%s\" to remove %d row versions by parallel vacuum worker");
 	else
@@ -2359,6 +2409,8 @@ lazy_cleanup_index(Relation indrel,
 	IndexVacuumInfo ivinfo;
 	const char *msg;
 	PGRUsage	ru0;
+	vacuum_error_callback_arg errcbarg;
+	ErrorContextCallback errcallback;
 
 	pg_rusage_init(&ru0);
 
@@ -2371,8 +2423,16 @@ lazy_cleanup_index(Relation indrel,
 	ivinfo.num_heap_tuples = reltuples;
 	ivinfo.strategy = vac_strategy;
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, indrel,
+			PROGRESS_VACUUM_PHASE_INDEX_CLEANUP);
+	error_context_stack = &errcallback;
+
 	*stats = index_vacuum_cleanup(&ivinfo, *stats);
 
+	/* Pop the error context stack */
+	error_context_stack = errcallback.previous;
+
 	if (!(*stats))
 		return;
 
@@ -3376,3 +3436,71 @@ parallel_vacuum_main(dsm_segment *seg, shm_toc *toc)
 	table_close(onerel, ShareUpdateExclusiveLock);
 	pfree(stats);
 }
+
+/*
+ * Error context callback for errors occurring during vacuum.
+ */
+static void
+vacuum_error_callback(void *arg)
+{
+	vacuum_error_callback_arg *cbarg = arg;
+
+	switch (cbarg->phase) {
+		case PROGRESS_VACUUM_PHASE_SCAN_HEAP:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext(_("while scanning block %u of relation \"%s.%s\""),
+						cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext(_("while vacuuming block %u of relation \"%s.%s\""),
+						cbarg->blkno, cbarg->relnamespace, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_INDEX:
+			errcontext(_("while vacuuming index \"%s.%s\" of relation \"%s\""),
+					cbarg->relnamespace, cbarg->indname, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_INDEX_CLEANUP:
+			errcontext(_("while cleaning up index \"%s.%s\" of relation \"%s\""),
+					cbarg->relnamespace, cbarg->indname, cbarg->relname);
+			break;
+
+		case PROGRESS_VACUUM_PHASE_TRUNCATE:
+		case PROGRESS_VACUUM_PHASE_FINAL_CLEANUP:
+		default:
+			return; /* Shouldn't happen: do nothing */
+	}
+}
+
+/* Initialize vacuum error callback */
+static void
+init_vacuum_error_callback(ErrorContextCallback *errcallback,
+				vacuum_error_callback_arg *errcbarg, Relation rel, int phase)
+{
+	switch (phase)
+	{
+		case PROGRESS_VACUUM_PHASE_SCAN_HEAP:
+		case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
+			errcbarg->relname = RelationGetRelationName(rel);
+			errcbarg->indname = NULL; /* Not used for heap */
+			break;
+
+		case PROGRESS_VACUUM_PHASE_VACUUM_INDEX:
+		case PROGRESS_VACUUM_PHASE_INDEX_CLEANUP:
+			/* rel is an index relation in index cases */
+			errcbarg->relname = get_rel_name(rel->rd_index->indrelid);
+			errcbarg->indname = RelationGetRelationName(rel);
+			break;
+	}
+
+	errcbarg->relnamespace = get_namespace_name(RelationGetNamespace(rel));
+	errcbarg->blkno = InvalidBlockNumber; /* Not known yet */
+	errcbarg->phase = phase;
+
+	errcallback->callback = vacuum_error_callback;
+	errcallback->arg = errcbarg;
+	errcallback->previous = error_context_stack;
+}
-- 
2.7.4

>From f28b919fa543d94cda1f9c0c920e564bca07905a Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryz...@telsasoft.com>
Date: Sun, 16 Feb 2020 20:25:13 -0600
Subject: [PATCH v21 2/2] add callback for truncation

---
 src/backend/access/heap/vacuumlazy.c | 32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 9e69294..ce0a515 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2500,11 +2500,17 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 	BlockNumber old_rel_pages = vacrelstats->rel_pages;
 	BlockNumber new_rel_pages;
 	int			lock_retry;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
 
 	/* Report that we are now truncating */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
 								 PROGRESS_VACUUM_PHASE_TRUNCATE);
 
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+			PROGRESS_VACUUM_PHASE_TRUNCATE);
+
 	/*
 	 * Loop until no more truncating can be done.
 	 */
@@ -2585,11 +2591,18 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 			return;
 		}
 
+		/* Setup error traceback support for ereport() */
+		errcbarg.blkno = new_rel_pages;
+		error_context_stack = &errcallback;
+
 		/*
 		 * Okay to truncate.
 		 */
 		RelationTruncate(onerel, new_rel_pages);
 
+		/* Pop the error context stack */
+		error_context_stack = errcallback.previous;
+
 		/*
 		 * We can release the exclusive lock as soon as we have truncated.
 		 * Other backends can't safely access the relation until they have
@@ -2629,6 +2642,12 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 	BlockNumber blkno;
 	BlockNumber prefetchedUntil;
 	instr_time	starttime;
+	ErrorContextCallback errcallback;
+	vacuum_error_callback_arg errcbarg;
+
+	/* Setup error traceback support for ereport() */
+	init_vacuum_error_callback(&errcallback, &errcbarg, onerel,
+			PROGRESS_VACUUM_PHASE_TRUNCATE);
 
 	/* Initialize the starttime if we check for conflicting lock requests */
 	INSTR_TIME_SET_CURRENT(starttime);
@@ -2692,6 +2711,10 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 
 		blkno--;
 
+		/* Setup error traceback support for ereport() */
+		errcbarg.blkno = blkno;
+		error_context_stack = &errcallback;
+
 		/* If we haven't prefetched this lot yet, do so now. */
 		if (prefetchedUntil > blkno)
 		{
@@ -2710,6 +2733,9 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno,
 								 RBM_NORMAL, vac_strategy);
 
+		/* Pop the error context stack */
+		error_context_stack = errcallback.previous;
+
 		/* In this phase we only need shared access to the buffer */
 		LockBuffer(buf, BUFFER_LOCK_SHARE);
 
@@ -3469,6 +3495,11 @@ vacuum_error_callback(void *arg)
 			break;
 
 		case PROGRESS_VACUUM_PHASE_TRUNCATE:
+			if (BlockNumberIsValid(cbarg->blkno))
+				errcontext(_("while truncating relation \"%s.%s\" to %u blocks"),
+						cbarg->relnamespace, cbarg->relname, cbarg->blkno);
+			break;
+
 		case PROGRESS_VACUUM_PHASE_FINAL_CLEANUP:
 		default:
 			return; /* Shouldn't happen: do nothing */
@@ -3484,6 +3515,7 @@ init_vacuum_error_callback(ErrorContextCallback *errcallback,
 	{
 		case PROGRESS_VACUUM_PHASE_SCAN_HEAP:
 		case PROGRESS_VACUUM_PHASE_VACUUM_HEAP:
+		case PROGRESS_VACUUM_PHASE_TRUNCATE:
 			errcbarg->relname = RelationGetRelationName(rel);
 			errcbarg->indname = NULL; /* Not used for heap */
 			break;
-- 
2.7.4

Reply via email to