From 5bc904406b619e1020a1e56d2b77b25d787ae34e Mon Sep 17 00:00:00 2001
From: Amit Langote <amitlan@postgresql.org>
Date: Mon, 16 Mar 2026 20:57:35 +0900
Subject: [PATCH v6 2/3] Cache per-batch resources for fast-path foreign key
 checks

The fast-path FK check introduced in the previous commit opens and
closes the PK relation, index, scan descriptor, and tuple slot on
every trigger invocation.  For bulk operations that fire thousands of
FK triggers in a single statement, this repeated setup/teardown
dominates the cost.

Introduce RI_FastPathEntry, a per-constraint hash table that caches
the open Relation (pk_rel, idx_rel), IndexScanDesc, TupleTableSlot,
and a registered Snapshot across all trigger invocations within a
single trigger-firing batch.  Entries are created lazily on first use
via ri_FastPathGetEntry() and persist until the batch ends.

The snapshot is registered once at entry creation time, and its
curcid is patched in place on each subsequent row rather than
taking a fresh snapshot per invocation.  This avoids the per-row
GetSnapshotData() cost.  Under REPEATABLE READ the transaction
snapshot is immutable so caching is a no-op.  Under READ COMMITTED
the cached snapshot will not reflect PK rows committed by other
backends mid-batch, but this is acceptable: the FK check only needs
PK rows visible before the statement began plus effects of earlier
triggers (tracked by curcid), concurrent commits would not be
reliably visible even with per-row snapshots since trigger firing
order is nondeterministic, and LockTupleKeyShare prevents the PK
row from disappearing regardless.

SnapshotSetCommandId() only patches the process-global statics, not
registered copies, so we patch entry->snapshot->curcid directly.

Permission checks (schema USAGE + table SELECT) are performed once at
cache entry creation rather than per flush.  The RI check runs as the
PK table owner (via SetUserIdAndSecContext), so in practice these
checks verify that the owner has access to their own table -- a
condition that holds unless privileges have been explicitly revoked
from the owner, which would equally break the SPI path.  Checking
once per batch avoids repeated syscache lookups from
pg_class_aclcheck() with no user-visible behavior change.

Lifecycle management:

  - AfterTriggerBatchCallback: A new general-purpose callback
    mechanism in trigger.c.  Callbacks registered via
    RegisterAfterTriggerBatchCallback() fire at the end of each
    trigger-firing batch (AfterTriggerEndQuery for immediate
    constraints, AfterTriggerFireDeferred at COMMIT, and
    AfterTriggerSetState for SET CONSTRAINTS IMMEDIATE).  The RI code
    registers ri_FastPathTeardown as a batch callback, which does
    orderly teardown: index_endscan, index_close, table_close,
    ExecDropSingleTupleTableSlot, UnregisterSnapshot.

  - XactCallback: ri_FastPathXactCallback NULLs the static cache
    pointer at transaction end.  On the normal path, cleanup already
    ran via the batch callback; this handles the abort path where
    TopTransactionContext destruction frees the memory but
    ResourceOwner handles the actual resource cleanup.

  - SubXactCallback: ri_FastPathSubXactCallback NULLs the static
    cache pointer on subtransaction abort.  ResourceOwner already
    cleaned up the resources; this prevents the batch callback from
    trying to double-close them.

  - AfterTriggerBatchIsActive(): Exported accessor that returns true
    when afterTriggers.query_depth >= 0.  During ALTER TABLE ... ADD
    FOREIGN KEY validation, RI triggers are called directly outside
    the after-trigger framework, so batch callbacks would never fire.
    The fast-path code uses this to fall back to a non-cached
    per-invocation path (open/scan/close each call) in that context.
---
 src/backend/commands/trigger.c            |  90 +++++++
 src/backend/utils/adt/ri_triggers.c       | 275 +++++++++++++++++++++-
 src/include/commands/trigger.h            |  18 ++
 src/test/regress/expected/foreign_key.out |  86 +++++++
 src/test/regress/sql/foreign_key.sql      |  80 +++++++
 src/tools/pgindent/typedefs.list          |   3 +
 6 files changed, 549 insertions(+), 3 deletions(-)

diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 98d402c0a3b..57d33f76a8c 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -3891,6 +3891,8 @@ typedef struct AfterTriggersData
 	/* per-subtransaction-level data: */
 	AfterTriggersTransData *trans_stack;	/* array of structs shown below */
 	int			maxtransdepth;	/* allocated len of above array */
+
+	List	   *batch_callbacks;	/* List of AfterTriggerCallbackItem */
 } AfterTriggersData;
 
 struct AfterTriggersQueryData
@@ -3927,6 +3929,13 @@ struct AfterTriggersTableData
 	TupleTableSlot *storeslot;	/* for converting to tuplestore's format */
 };
 
+/* Entry in afterTriggers.batch_callbacks */
+typedef struct AfterTriggerCallbackItem
+{
+	AfterTriggerBatchCallback callback;
+	void	   *arg;
+} AfterTriggerCallbackItem;
+
 static AfterTriggersData afterTriggers;
 
 static void AfterTriggerExecute(EState *estate,
@@ -3962,6 +3971,7 @@ static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
 													Oid tgoid, bool tgisdeferred);
 static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
 
+static void FireAfterTriggerBatchCallbacks(void);
 
 /*
  * Get the FDW tuplestore for the current trigger query level, creating it
@@ -5087,6 +5097,7 @@ AfterTriggerBeginXact(void)
 	 */
 	afterTriggers.firing_counter = (CommandId) 1;	/* mustn't be 0 */
 	afterTriggers.query_depth = -1;
+	afterTriggers.batch_callbacks = NIL;
 
 	/*
 	 * Verify that there is no leftover state remaining.  If these assertions
@@ -5208,6 +5219,8 @@ AfterTriggerEndQuery(EState *estate)
 			break;
 	}
 
+	FireAfterTriggerBatchCallbacks();
+
 	/* Release query-level-local storage, including tuplestores if any */
 	AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
 
@@ -5315,6 +5328,8 @@ AfterTriggerFireDeferred(void)
 			break;				/* all fired */
 	}
 
+	FireAfterTriggerBatchCallbacks();
+
 	/*
 	 * We don't bother freeing the event list, since it will go away anyway
 	 * (and more efficiently than via pfree) in AfterTriggerEndXact.
@@ -6057,6 +6072,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
 				break;			/* all fired */
 		}
 
+		FireAfterTriggerBatchCallbacks();
+
 		if (snapshot_set)
 			PopActiveSnapshot();
 	}
@@ -6753,3 +6770,76 @@ check_modified_virtual_generated(TupleDesc tupdesc, HeapTuple tuple)
 
 	return tuple;
 }
+
+/*
+ * RegisterAfterTriggerBatchCallback
+ *		Register a function to be called when the current trigger-firing
+ *		batch completes.
+ *
+ * Must be called from within a trigger function's execution context
+ * (i.e., while afterTriggers state is active).
+ *
+ * The callback list is cleared after invocation, so the caller must
+ * re-register for each new batch if needed.
+ */
+void
+RegisterAfterTriggerBatchCallback(AfterTriggerBatchCallback callback,
+								  void *arg)
+{
+	AfterTriggerCallbackItem *item;
+	MemoryContext oldcxt;
+
+	/*
+	 * Allocate in TopTransactionContext so the item survives for the duration
+	 * of the batch, which may span multiple trigger invocations.
+	 */
+	oldcxt = MemoryContextSwitchTo(TopTransactionContext);
+	item = palloc(sizeof(AfterTriggerCallbackItem));
+	item->callback = callback;
+	item->arg = arg;
+	afterTriggers.batch_callbacks =
+		lappend(afterTriggers.batch_callbacks, item);
+	MemoryContextSwitchTo(oldcxt);
+}
+
+/*
+ * FireAfterTriggerBatchCallbacks
+ *		Invoke and clear all registered batch callbacks.
+ *
+ * Only fires at the outermost query level (query_depth == 0) or from
+ * top-level operations (query_depth == -1, e.g. AfterTriggerFireDeferred
+ * at COMMIT).  Nested queries from SPI inside AFTER triggers run at
+ * depth > 0 and must not tear down resources the outer batch still needs.
+ */
+static void
+FireAfterTriggerBatchCallbacks(void)
+{
+	ListCell   *lc;
+
+	if (afterTriggers.query_depth > 0)
+		return;
+
+	foreach(lc, afterTriggers.batch_callbacks)
+	{
+		AfterTriggerCallbackItem *item = lfirst(lc);
+
+		item->callback(item->arg);
+	}
+
+	list_free_deep(afterTriggers.batch_callbacks);
+	afterTriggers.batch_callbacks = NIL;
+}
+
+/*
+ * AfterTriggerBatchIsActive
+ *		Returns true if we're inside a query-level trigger batch where
+ *		registered batch callbacks will actually be invoked.
+ *
+ * This is false during validateForeignKeyConstraint(), which calls
+ * RI trigger functions directly outside the after-trigger framework.
+ */
+bool
+AfterTriggerBatchIsActive(void)
+{
+	return afterTriggers.query_depth >= 0;
+}
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 2357735c4c8..467418cadc0 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -196,6 +196,23 @@ typedef struct RI_CompareHashEntry
 	FmgrInfo	cast_func_finfo;	/* in case we must coerce input */
 } RI_CompareHashEntry;
 
+/*
+ * RI_FastPathEntry
+ *		Per-constraint cache of resources needed by ri_FastPathCheckCached().
+ *
+ * One entry per constraint, keyed by pg_constraint OID.  Created lazily
+ * by ri_FastPathGetEntry() on first use within a trigger-firing batch
+ * and torn down by ri_FastPathTeardown() at batch end.
+ */
+typedef struct RI_FastPathEntry
+{
+	Oid			conoid;			/* hash key: pg_constraint OID */
+	Relation	pk_rel;
+	Relation	idx_rel;
+	IndexScanDesc scandesc;
+	TupleTableSlot *slot;
+	Snapshot	snapshot;		/* registered snapshot for the scan */
+} RI_FastPathEntry;
 
 /*
  * Local data
@@ -205,6 +222,8 @@ static HTAB *ri_query_cache = NULL;
 static HTAB *ri_compare_cache = NULL;
 static dclist_head ri_constraint_cache_valid_list;
 
+static HTAB *ri_fastpath_cache = NULL;
+static bool ri_fastpath_callback_registered = false;
 
 /*
  * Local function prototypes
@@ -255,6 +274,8 @@ static bool ri_PerformCheck(const RI_ConstraintInfo *riinfo,
 							bool detectNewRows, int expect_OK);
 static void ri_FastPathCheck(const RI_ConstraintInfo *riinfo,
 							 Relation fk_rel, TupleTableSlot *newslot);
+static void ri_FastPathCheckCached(const RI_ConstraintInfo *riinfo,
+								   Relation fk_rel, TupleTableSlot *newslot);
 static bool ri_FastPathProbeOne(Relation pk_rel, Relation idx_rel,
 								IndexScanDesc scandesc, TupleTableSlot *slot,
 								Snapshot snapshot, const RI_ConstraintInfo *riinfo,
@@ -277,6 +298,9 @@ pg_noreturn static void ri_ReportViolation(const RI_ConstraintInfo *riinfo,
 										   Relation pk_rel, Relation fk_rel,
 										   TupleTableSlot *violatorslot, TupleDesc tupdesc,
 										   int queryno, bool is_restrict, bool partgone);
+static RI_FastPathEntry *ri_FastPathGetEntry(const RI_ConstraintInfo *riinfo,
+											 Relation fk_rel);
+static void ri_FastPathTeardown(void *arg);
 
 
 /*
@@ -387,12 +411,16 @@ RI_FKey_check(TriggerData *trigdata)
 	 * index scan + tuple lock.  This is semantically equivalent to
 	 * the SPI path below but avoids the per-row executor overhead.
 	 *
-	 * ri_FastPathCheck() reports the violation itself (via ereport)
-	 * if no matching PK row is found, so it only returns on success.
+	 * ri_FastPathCheckCached and ri_FastPathCheck() reports the violation
+	 * themselves if no matching PK row is found, so it only returns on
+	 * success.
 	 */
 	if (ri_fastpath_is_applicable(riinfo))
 	{
-		ri_FastPathCheck(riinfo, fk_rel, newslot);
+		if (AfterTriggerBatchIsActive())
+			ri_FastPathCheckCached(riinfo, fk_rel, newslot);
+		else
+			ri_FastPathCheck(riinfo, fk_rel, newslot);
 		return PointerGetDatum(NULL);
 	}
 
@@ -2742,6 +2770,73 @@ ri_FastPathCheck(const RI_ConstraintInfo *riinfo,
 	table_close(pk_rel, NoLock);
 }
 
+/*
+ * ri_FastPathCheckCached
+ *		Cached-resource variant of ri_FastPathCheck for use within the
+ *		after-trigger framework.
+ *
+ * Uses the per-batch cache (RI_FastPathEntry) to avoid per-row relation
+ * open/close, scan begin/end, and snapshot registration.  The snapshot's
+ * curcid is patched each call so the scan sees effects of prior triggers.
+ *
+ * Like ri_FastPathCheck, reports the violation via ri_ReportViolation()
+ * if no matching PK row is found.
+ */
+static void
+ri_FastPathCheckCached(const RI_ConstraintInfo *riinfo,
+					   Relation fk_rel, TupleTableSlot *newslot)
+{
+	RI_FastPathEntry *fpentry = ri_FastPathGetEntry(riinfo, fk_rel);
+	Relation	pk_rel = fpentry->pk_rel;
+	Relation	idx_rel = fpentry->idx_rel;
+	IndexScanDesc scandesc = fpentry->scandesc;
+	Snapshot	snapshot = fpentry->snapshot;
+	TupleTableSlot *slot = fpentry->slot;
+	Datum		pk_vals[INDEX_MAX_KEYS];
+	char		pk_nulls[INDEX_MAX_KEYS];
+	ScanKeyData skey[INDEX_MAX_KEYS];
+	bool		found;
+	Oid			saved_userid;
+	int			saved_sec_context;
+	MemoryContext oldcxt;
+
+	/*
+	 * Advance the command counter and patch the cached snapshot's curcid so
+	 * the scan sees PK rows inserted by earlier triggers in this statement.
+	 */
+	CommandCounterIncrement();
+	fpentry->snapshot->curcid = GetCurrentCommandId(false);
+
+	if (riinfo->fpmeta == NULL)
+		ri_populate_fastpath_metadata((RI_ConstraintInfo *) riinfo,
+									  fk_rel, idx_rel);
+	Assert(riinfo->fpmeta);
+
+	GetUserIdAndSecContext(&saved_userid, &saved_sec_context);
+	SetUserIdAndSecContext(RelationGetForm(pk_rel)->relowner,
+						   saved_sec_context |
+						   SECURITY_LOCAL_USERID_CHANGE |
+						   SECURITY_NOFORCE_RLS);
+
+	ri_ExtractValues(fk_rel, newslot, riinfo, false, pk_vals, pk_nulls);
+	build_index_scankeys(riinfo, idx_rel, pk_vals, pk_nulls, skey);
+
+	/*
+	 * The cached scandesc lives in TopTransactionContext, but the btree AM
+	 * defers some allocations to the first index_getnext_slot call.  Ensure
+	 * those land in TopTransactionContext too.
+	 */
+	oldcxt = MemoryContextSwitchTo(TopTransactionContext);
+	found = ri_FastPathProbeOne(pk_rel, idx_rel, scandesc, slot, snapshot,
+								riinfo, skey, riinfo->nkeys);
+	MemoryContextSwitchTo(oldcxt);
+	SetUserIdAndSecContext(saved_userid, saved_sec_context);
+
+	if (!found)
+		ri_ReportViolation(riinfo, pk_rel, fk_rel, newslot, NULL,
+						   RI_PLAN_CHECK_LOOKUPPK, false, false);
+}
+
 /*
  * ri_FastPathProbeOne
  *		Probe the PK index for one set of scan keys, lock the matching
@@ -3672,3 +3767,177 @@ RI_FKey_trigger_type(Oid tgfoid)
 
 	return RI_TRIGGER_NONE;
 }
+
+/*
+ * ri_FastPathTeardown
+ *		Tear down all cached fast-path state.
+ *
+ * Called as an AfterTriggerBatchCallback at end of batch.
+ */
+static void
+ri_FastPathTeardown(void *arg)
+{
+	HASH_SEQ_STATUS status;
+	RI_FastPathEntry *entry;
+
+	if (ri_fastpath_cache == NULL)
+		return;
+
+	hash_seq_init(&status, ri_fastpath_cache);
+	while ((entry = hash_seq_search(&status)) != NULL)
+	{
+		/* Close both scans before closing idx_rel. */
+		if (entry->scandesc)
+			index_endscan(entry->scandesc);
+		if (entry->idx_rel)
+			index_close(entry->idx_rel, NoLock);
+		if (entry->pk_rel)
+			table_close(entry->pk_rel, NoLock);
+		if (entry->slot)
+			ExecDropSingleTupleTableSlot(entry->slot);
+		if (entry->snapshot)
+			UnregisterSnapshot(entry->snapshot);
+	}
+
+	hash_destroy(ri_fastpath_cache);
+	ri_fastpath_cache = NULL;
+	ri_fastpath_callback_registered = false;
+}
+
+static bool ri_fastpath_xact_callback_registered = false;
+
+static void
+ri_FastPathXactCallback(XactEvent event, void *arg)
+{
+	/*
+	 * TopTransactionContext is destroyed at end of transaction, taking the
+	 * hash table and all cached resources with it.  Just reset our static
+	 * pointers so we don't dereference freed memory.
+	 *
+	 * In the normal (non-error) path, ri_FastPathTeardown already ran via the
+	 * batch callback and did orderly teardown.  Here we're just handling the
+	 * abort path where that callback never fired.
+	 */
+	ri_fastpath_cache = NULL;
+	ri_fastpath_callback_registered = false;
+}
+
+static void
+ri_FastPathSubXactCallback(SubXactEvent event, SubTransactionId mySubid,
+						   SubTransactionId parentSubid, void *arg)
+{
+	if (event == SUBXACT_EVENT_ABORT_SUB)
+	{
+		/*
+		 * ResourceOwner already cleaned up relations and snapshots.  Just
+		 * NULL our pointers so the still-registered batch callback becomes a
+		 * no-op.  The hash table memory in TopTransactionContext will be
+		 * freed at transaction end.
+		 */
+		ri_fastpath_cache = NULL;
+		ri_fastpath_callback_registered = false;
+	}
+}
+
+/*
+ * ri_FastPathGetEntry
+ *		Look up or create a per-batch cache entry for the given constraint.
+ *
+ * On first call for a constraint within a batch: opens pk_rel and the
+ * index, begins an index scan, allocates a result slot, and registers
+ * the cleanup callback.
+ *
+ * On subsequent calls: returns the existing entry.  Caller uses
+ * index_rescan() with new keys.
+ */
+static RI_FastPathEntry *
+ri_FastPathGetEntry(const RI_ConstraintInfo *riinfo, Relation fk_rel)
+{
+	RI_FastPathEntry *entry;
+	bool		found;
+
+	/* Create hash table on first use in this batch */
+	if (ri_fastpath_cache == NULL)
+	{
+		HASHCTL		ctl;
+
+		if (!ri_fastpath_xact_callback_registered)
+		{
+			RegisterXactCallback(ri_FastPathXactCallback, NULL);
+			RegisterSubXactCallback(ri_FastPathSubXactCallback, NULL);
+			ri_fastpath_xact_callback_registered = true;
+		}
+
+		ctl.keysize = sizeof(Oid);
+		ctl.entrysize = sizeof(RI_FastPathEntry);
+		ctl.hcxt = TopTransactionContext;
+		ri_fastpath_cache = hash_create("RI fast-path cache",
+										16,
+										&ctl,
+										HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+	}
+
+	entry = hash_search(ri_fastpath_cache, &riinfo->constraint_id,
+						HASH_ENTER, &found);
+
+	if (!found)
+	{
+		MemoryContext oldcxt;
+		Oid		saved_userid;
+		int		saved_sec_context;
+
+		/*
+		 * Zero out non-key fields so ri_FastPathTeardown is safe if we error
+		 * out during partial initialization below.
+		 */
+		memset(((char *) entry) + offsetof(RI_FastPathEntry, pk_rel), 0,
+			   sizeof(RI_FastPathEntry) - offsetof(RI_FastPathEntry, pk_rel));
+
+		oldcxt = MemoryContextSwitchTo(TopTransactionContext);
+
+		/*
+		 * Open PK table and its unique index.
+		 *
+		 * RowShareLock on pk_rel matches what the SPI path's SELECT ... FOR
+		 * KEY SHARE would acquire as a relation-level lock. AccessShareLock
+		 * on the index is standard for index scans.
+		 *
+		 * We don't release these locks until end of transaction, matching SPI
+		 * behavior.
+		 */
+		entry->pk_rel = table_open(riinfo->pk_relid, RowShareLock);
+		entry->idx_rel = index_open(riinfo->conindid, AccessShareLock);
+
+		/*
+		 * Register an initial snapshot.  Its curcid will be patched in place
+		 * on each subsequent row (see ri_FastPathCheckCached()), avoiding
+		 * per-row GetSnapshotData() overhead.
+		 */
+		entry->snapshot = RegisterSnapshot(GetTransactionSnapshot());
+
+		entry->slot = table_slot_create(entry->pk_rel, NULL);
+
+		entry->scandesc = index_beginscan(entry->pk_rel, entry->idx_rel,
+										  entry->snapshot, NULL,
+										  riinfo->nkeys, 0);
+
+		MemoryContextSwitchTo(oldcxt);
+
+		/* Ensure cleanup at end of this trigger-firing batch */
+		if (!ri_fastpath_callback_registered)
+		{
+			RegisterAfterTriggerBatchCallback(ri_FastPathTeardown, NULL);
+			ri_fastpath_callback_registered = true;
+		}
+
+		GetUserIdAndSecContext(&saved_userid, &saved_sec_context);
+		SetUserIdAndSecContext(RelationGetForm(entry->pk_rel)->relowner,
+							   saved_sec_context |
+							   SECURITY_LOCAL_USERID_CHANGE |
+							   SECURITY_NOFORCE_RLS);
+		ri_CheckPermissions(entry->pk_rel);
+		SetUserIdAndSecContext(saved_userid, saved_sec_context);
+	}
+
+	return entry;
+}
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 556c86bf5e1..4304abffc8d 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -289,4 +289,22 @@ extern void RI_PartitionRemove_Check(Trigger *trigger, Relation fk_rel,
 
 extern int	RI_FKey_trigger_type(Oid tgfoid);
 
+/*
+ * Callback type for end-of-trigger-batch notifications.
+ *
+ * Registered via RegisterAfterTriggerBatchCallback().  Invoked when
+ * a batch of after-trigger processing completes:
+ *	- AfterTriggerEndQuery()      (immediate constraints)
+ *	- AfterTriggerFireDeferred()  (deferred constraints at COMMIT)
+ *	- AfterTriggerSetState()      (SET CONSTRAINTS IMMEDIATE)
+ *
+ * The callback list is cleared after each batch.  Callers must
+ * re-register if they need to be called again in a subsequent batch.
+ */
+typedef void (*AfterTriggerBatchCallback) (void *arg);
+
+extern void RegisterAfterTriggerBatchCallback(AfterTriggerBatchCallback callback,
+											  void *arg);
+extern bool AfterTriggerBatchIsActive(void);
+
 #endif							/* TRIGGER_H */
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index 0826f518004..25d505c6c12 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -3504,3 +3504,89 @@ DETAIL:  drop cascades to table fkpart13_t1
 drop cascades to table fkpart13_t2
 drop cascades to table fkpart13_t3
 RESET search_path;
+-- Tests foreign key check fast-path no-cache path.
+CREATE TABLE fp_pk_alter (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter SELECT generate_series(1, 100);
+CREATE TABLE fp_fk_alter (a int);
+INSERT INTO fp_fk_alter SELECT generate_series(1, 100);
+-- Validation path: should succeed
+ALTER TABLE fp_fk_alter ADD FOREIGN KEY (a) REFERENCES fp_pk_alter;
+INSERT INTO fp_fk_alter VALUES (101);  -- should fail (constraint active)
+ERROR:  insert or update on table "fp_fk_alter" violates foreign key constraint "fp_fk_alter_a_fkey"
+DETAIL:  Key (a)=(101) is not present in table "fp_pk_alter".
+DROP TABLE fp_fk_alter, fp_pk_alter;
+-- Separate test: validation catches existing violation
+CREATE TABLE fp_pk_alter2 (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter2 VALUES (1);
+CREATE TABLE fp_fk_alter2 (a int);
+INSERT INTO fp_fk_alter2 VALUES (1), (200);  -- 200 has no PK match
+ALTER TABLE fp_fk_alter2 ADD FOREIGN KEY (a) REFERENCES fp_pk_alter2;  -- should fail
+ERROR:  insert or update on table "fp_fk_alter2" violates foreign key constraint "fp_fk_alter2_a_fkey"
+DETAIL:  Key (a)=(200) is not present in table "fp_pk_alter2".
+DROP TABLE fp_fk_alter2, fp_pk_alter2;
+-- Tests that the fast-path handles caching for multiple constraints
+CREATE TABLE fp_pk1 (a int PRIMARY KEY);
+CREATE TABLE fp_pk2 (b int PRIMARY KEY);
+INSERT INTO fp_pk1 VALUES (1);
+INSERT INTO fp_pk2 VALUES (1);
+CREATE TABLE fp_multi_fk (
+    a int REFERENCES fp_pk1,
+    b int REFERENCES fp_pk2
+);
+INSERT INTO fp_multi_fk VALUES (1, 1);  -- two constraints, one batch
+INSERT INTO fp_multi_fk VALUES (1, 2);  -- second constraint fails
+ERROR:  insert or update on table "fp_multi_fk" violates foreign key constraint "fp_multi_fk_b_fkey"
+DETAIL:  Key (b)=(2) is not present in table "fp_pk2".
+DROP TABLE fp_multi_fk, fp_pk1, fp_pk2;
+-- Test that fast-path cache handles deferred constraints and SET CONSTRAINTS IMMEDIATE
+CREATE TABLE fp_pk_defer (a int PRIMARY KEY);
+CREATE TABLE fp_fk_defer (a int REFERENCES fp_pk_defer DEFERRABLE INITIALLY DEFERRED);
+INSERT INTO fp_pk_defer VALUES (1), (2);
+BEGIN;
+INSERT INTO fp_fk_defer VALUES (1);
+INSERT INTO fp_fk_defer VALUES (2);
+SET CONSTRAINTS ALL IMMEDIATE;  -- fires batch callback here
+INSERT INTO fp_fk_defer VALUES (3);  -- should fail, also tests that cache was cleaned up
+ERROR:  insert or update on table "fp_fk_defer" violates foreign key constraint "fp_fk_defer_a_fkey"
+DETAIL:  Key (a)=(3) is not present in table "fp_pk_defer".
+COMMIT;
+DROP TABLE fp_pk_defer, fp_fk_defer;
+-- Subtransaction abort: cached state must be invalidated on ROLLBACK TO
+CREATE TABLE fp_pk_subxact (a int PRIMARY KEY);
+CREATE TABLE fp_fk_subxact (a int REFERENCES fp_pk_subxact);
+INSERT INTO fp_pk_subxact VALUES (1), (2);
+BEGIN;
+INSERT INTO fp_fk_subxact VALUES (1);
+SAVEPOINT sp1;
+INSERT INTO fp_fk_subxact VALUES (2);
+ROLLBACK TO sp1;
+INSERT INTO fp_fk_subxact VALUES (1);
+COMMIT;
+SELECT * FROM fp_fk_subxact;
+ a 
+---
+ 1
+ 1
+(2 rows)
+
+DROP TABLE fp_fk_subxact, fp_pk_subxact;
+-- FK check must see PK rows inserted by earlier AFTER triggers
+-- firing on the same statement
+CREATE TABLE fp_pk_cci (a int PRIMARY KEY);
+CREATE TABLE fp_fk_cci (a int REFERENCES fp_pk_cci);
+CREATE FUNCTION fp_auto_pk() RETURNS trigger AS $$
+BEGIN
+  RAISE NOTICE 'fp_auto_pk called';
+  INSERT INTO fp_pk_cci VALUES (NEW.a);
+  RETURN NEW;
+END $$ LANGUAGE plpgsql;
+-- Name sorts before the RI trigger, so fires first per row
+CREATE TRIGGER "AAA_auto" AFTER INSERT ON fp_fk_cci
+  FOR EACH ROW EXECUTE FUNCTION fp_auto_pk();
+-- Should succeed: AAA_auto provisions the PK row before RI check
+INSERT INTO fp_fk_cci VALUES (1), (2), (3);
+NOTICE:  fp_auto_pk called
+NOTICE:  fp_auto_pk called
+NOTICE:  fp_auto_pk called
+DROP TABLE fp_fk_cci, fp_pk_cci;
+DROP FUNCTION fp_auto_pk;
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index e9ee29331cb..cedd20c8d11 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -2498,3 +2498,83 @@ WITH cte AS (
 
 DROP SCHEMA fkpart13 CASCADE;
 RESET search_path;
+
+-- Tests foreign key check fast-path no-cache path.
+CREATE TABLE fp_pk_alter (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter SELECT generate_series(1, 100);
+CREATE TABLE fp_fk_alter (a int);
+INSERT INTO fp_fk_alter SELECT generate_series(1, 100);
+-- Validation path: should succeed
+ALTER TABLE fp_fk_alter ADD FOREIGN KEY (a) REFERENCES fp_pk_alter;
+INSERT INTO fp_fk_alter VALUES (101);  -- should fail (constraint active)
+DROP TABLE fp_fk_alter, fp_pk_alter;
+
+-- Separate test: validation catches existing violation
+CREATE TABLE fp_pk_alter2 (a int PRIMARY KEY);
+INSERT INTO fp_pk_alter2 VALUES (1);
+CREATE TABLE fp_fk_alter2 (a int);
+INSERT INTO fp_fk_alter2 VALUES (1), (200);  -- 200 has no PK match
+ALTER TABLE fp_fk_alter2 ADD FOREIGN KEY (a) REFERENCES fp_pk_alter2;  -- should fail
+DROP TABLE fp_fk_alter2, fp_pk_alter2;
+
+-- Tests that the fast-path handles caching for multiple constraints
+CREATE TABLE fp_pk1 (a int PRIMARY KEY);
+CREATE TABLE fp_pk2 (b int PRIMARY KEY);
+INSERT INTO fp_pk1 VALUES (1);
+INSERT INTO fp_pk2 VALUES (1);
+CREATE TABLE fp_multi_fk (
+    a int REFERENCES fp_pk1,
+    b int REFERENCES fp_pk2
+);
+INSERT INTO fp_multi_fk VALUES (1, 1);  -- two constraints, one batch
+INSERT INTO fp_multi_fk VALUES (1, 2);  -- second constraint fails
+DROP TABLE fp_multi_fk, fp_pk1, fp_pk2;
+
+-- Test that fast-path cache handles deferred constraints and SET CONSTRAINTS IMMEDIATE
+CREATE TABLE fp_pk_defer (a int PRIMARY KEY);
+CREATE TABLE fp_fk_defer (a int REFERENCES fp_pk_defer DEFERRABLE INITIALLY DEFERRED);
+INSERT INTO fp_pk_defer VALUES (1), (2);
+
+BEGIN;
+INSERT INTO fp_fk_defer VALUES (1);
+INSERT INTO fp_fk_defer VALUES (2);
+SET CONSTRAINTS ALL IMMEDIATE;  -- fires batch callback here
+INSERT INTO fp_fk_defer VALUES (3);  -- should fail, also tests that cache was cleaned up
+COMMIT;
+DROP TABLE fp_pk_defer, fp_fk_defer;
+
+-- Subtransaction abort: cached state must be invalidated on ROLLBACK TO
+CREATE TABLE fp_pk_subxact (a int PRIMARY KEY);
+CREATE TABLE fp_fk_subxact (a int REFERENCES fp_pk_subxact);
+INSERT INTO fp_pk_subxact VALUES (1), (2);
+BEGIN;
+INSERT INTO fp_fk_subxact VALUES (1);
+SAVEPOINT sp1;
+INSERT INTO fp_fk_subxact VALUES (2);
+ROLLBACK TO sp1;
+INSERT INTO fp_fk_subxact VALUES (1);
+COMMIT;
+SELECT * FROM fp_fk_subxact;
+DROP TABLE fp_fk_subxact, fp_pk_subxact;
+
+-- FK check must see PK rows inserted by earlier AFTER triggers
+-- firing on the same statement
+CREATE TABLE fp_pk_cci (a int PRIMARY KEY);
+CREATE TABLE fp_fk_cci (a int REFERENCES fp_pk_cci);
+
+CREATE FUNCTION fp_auto_pk() RETURNS trigger AS $$
+BEGIN
+  RAISE NOTICE 'fp_auto_pk called';
+  INSERT INTO fp_pk_cci VALUES (NEW.a);
+  RETURN NEW;
+END $$ LANGUAGE plpgsql;
+
+-- Name sorts before the RI trigger, so fires first per row
+CREATE TRIGGER "AAA_auto" AFTER INSERT ON fp_fk_cci
+  FOR EACH ROW EXECUTE FUNCTION fp_auto_pk();
+
+-- Should succeed: AAA_auto provisions the PK row before RI check
+INSERT INTO fp_fk_cci VALUES (1), (2), (3);
+
+DROP TABLE fp_fk_cci, fp_pk_cci;
+DROP FUNCTION fp_auto_pk;
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 3da19d41413..9b90f70ecce 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -30,6 +30,8 @@ AddForeignUpdateTargets_function
 AddrInfo
 AffixNode
 AffixNodeData
+AfterTriggerBatchCallback
+AfterTriggerCallbackItem
 AfterTriggerEvent
 AfterTriggerEventChunk
 AfterTriggerEventData
@@ -2448,6 +2450,7 @@ RIX
 RI_CompareHashEntry
 RI_CompareKey
 RI_ConstraintInfo
+RI_FastPathEntry
 RI_QueryHashEntry
 RI_QueryKey
 RTEKind
-- 
2.47.3

