From e3da29f24c92a82a2d38929c1aa96fe72cc98a0b Mon Sep 17 00:00:00 2001
From: Amit Langote <amitlan@postgresql.org>
Date: Tue, 11 Nov 2025 22:30:52 +0900
Subject: [PATCH v5 4/6] Use pruning-aware locking in cached plans

Extend GetCachedPlan() to perform ExecutorPrep() on each planned
statement, capturing unpruned relids and initial pruning results.
Use this data to acquire execution locks only on surviving partitions,
avoiding unnecessary locking of pruned tables even when using cached
plans.

Introduce CachedPlanPrepData to carry the EStates created by
ExecutorPrep() through the plan caching layer. The prep_estates
list is indexed one-to-one with CachedPlan->stmt_list and is
populated when GetCachedPlan() prepares a reused generic plan.
Adjust call sites in SPI, functions, portals, and EXPLAIN to
propagate this data.

Partition pruning expressions may call PL functions that require
an active snapshot (e.g., via EnsurePortalSnapshotExists()).
AcquireExecutorLocksUnpruned() establishes one before calling
ExecutorPrep() if needed, ensuring these expressions can execute
correctly during plan cache validation.

To maintain correctness when all target partitions are pruned, also
reinstate the firstResultRel locking behavior lost in commit
28317de72. That commit required the first ModifyTable target to
remain initialized for executor assumptions to hold. We now
explicitly track these relids in PlannerGlobal and PlannedStmt so they
are locked even if pruned, preserving that rule across cached plan
reuse.

Add a regression test that causes a generic plan to become invalid
while pruning-aware setup is running. The pruning expression calls a
function that can perform DDL on a partition, making the plan stale
during reuse.

The test's purpose is to drive execution through the invalidation
path that discards any ExecutorPrep state created before the plan was
found invalid, providing coverage for that cleanup logic.
---
 src/backend/commands/prepare.c                |  19 +-
 src/backend/executor/functions.c              |   1 +
 src/backend/executor/nodeModifyTable.c        |   5 +-
 src/backend/executor/spi.c                    |  26 +-
 src/backend/optimizer/plan/planner.c          |   1 +
 src/backend/optimizer/plan/setrefs.c          |  20 ++
 src/backend/tcop/postgres.c                   |   9 +-
 src/backend/utils/cache/plancache.c           | 292 +++++++++++++++++-
 src/include/nodes/pathnodes.h                 |   3 +
 src/include/nodes/plannodes.h                 |  10 +
 src/include/utils/plancache.h                 |  27 +-
 src/test/regress/expected/partition_prune.out |  50 ++-
 src/test/regress/expected/plancache.out       |  62 ++++
 src/test/regress/sql/partition_prune.sql      |  24 +-
 src/test/regress/sql/plancache.sql            |  51 +++
 15 files changed, 574 insertions(+), 26 deletions(-)

diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 005fbb48aa5..e8cd47131ce 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -154,6 +154,7 @@ ExecuteQuery(ParseState *pstate,
 {
 	PreparedStatement *entry;
 	CachedPlan *cplan;
+	CachedPlanPrepData cprep = {0};
 	List	   *plan_list;
 	ParamListInfo paramLI = NULL;
 	EState	   *estate = NULL;
@@ -193,7 +194,10 @@ ExecuteQuery(ParseState *pstate,
 									   entry->plansource->query_string);
 
 	/* Replan if needed, and increment plan refcount for portal */
-	cplan = GetCachedPlan(entry->plansource, paramLI, NULL, NULL);
+	/* Keep ExecutorPrep state with the portal and its resowner. */
+	cprep.context = portal->portalContext;
+	cprep.owner = portal->resowner;
+	cplan = GetCachedPlan(entry->plansource, paramLI, NULL, NULL, &cprep);
 	plan_list = cplan->stmt_list;
 
 	/*
@@ -205,7 +209,7 @@ ExecuteQuery(ParseState *pstate,
 					  query_string,
 					  entry->plansource->commandTag,
 					  plan_list,
-					  NIL,
+					  cprep.prep_estates,
 					  cplan);
 
 	/*
@@ -575,6 +579,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
 	PreparedStatement *entry;
 	const char *query_string;
 	CachedPlan *cplan;
+	CachedPlanPrepData cprep = {0};
 	List	   *plan_list;
 	List	   *prep_estates;
 	ListCell   *p;
@@ -633,8 +638,14 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
 	}
 
 	/* Replan if needed, and acquire a transient refcount */
+	/* ExecutorPrep state is local to this EXPLAIN EXECUTE call. */
+	cprep.context = CurrentMemoryContext;
+	cprep.owner = CurrentResourceOwner;
+	if (es->generic)
+		cprep.eflags = EXEC_FLAG_EXPLAIN_GENERIC;
 	cplan = GetCachedPlan(entry->plansource, paramLI,
-						  CurrentResourceOwner, pstate->p_queryEnv);
+						  CurrentResourceOwner, pstate->p_queryEnv,
+						  &cprep);
 
 	INSTR_TIME_SET_CURRENT(planduration);
 	INSTR_TIME_SUBTRACT(planduration, planstart);
@@ -653,7 +664,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
 	}
 
 	plan_list = cplan->stmt_list;
-	prep_estates = NIL;
+	prep_estates = cprep.prep_estates;
 
 	/* Explain each query */
 	prep_lc = list_head(prep_estates);
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index c93e2664cfd..65dfae58dcf 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -698,6 +698,7 @@ init_execution_state(SQLFunctionCachePtr fcache)
 	fcache->cplan = GetCachedPlan(plansource,
 								  fcache->paramLI,
 								  fcache->cowner,
+								  NULL,
 								  NULL);
 
 	/*
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index f5e9d369940..fc7ff46f86a 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -4664,8 +4664,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	 * as a reference for building the ResultRelInfo of the target partition.
 	 * In either case, it doesn't matter which result relation is kept, so we
 	 * just keep the first one, if all others have been pruned.  See also,
-	 * ExecDoInitialPruning(), which ensures that this first result relation
-	 * has been locked.
+	 * AcquireExecutorLocksUnpruned(), which ensures that this first result
+	 * relation has been locked.
 	 */
 	i = 0;
 	foreach(l, node->resultRelations)
@@ -4679,6 +4679,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 			/* all result relations pruned; keep the first one */
 			keep_rel = true;
 			rti = linitial_int(node->resultRelations);
+			Assert(list_member_int(estate->es_plannedstmt->firstResultRels, rti));
 			i = 0;
 		}
 
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 994a69a1c8e..13703969dd8 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1579,6 +1579,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 {
 	CachedPlanSource *plansource;
 	CachedPlan *cplan;
+	CachedPlanPrepData cprep = {0};
 	List	   *stmt_list;
 	char	   *query_string;
 	Snapshot	snapshot;
@@ -1659,7 +1660,11 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 	 */
 
 	/* Replan if needed, and increment plan refcount for portal */
-	cplan = GetCachedPlan(plansource, paramLI, NULL, _SPI_current->queryEnv);
+	/* ExecutorPrep state lives in this portal's context. */
+	cprep.context = portal->portalContext;
+	cprep.owner = portal->resowner;
+	cplan = GetCachedPlan(plansource, paramLI, NULL, _SPI_current->queryEnv,
+						  &cprep);
 	stmt_list = cplan->stmt_list;
 
 	if (!plan->saved)
@@ -1685,7 +1690,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 					  query_string,
 					  plansource->commandTag,
 					  stmt_list,
-					  NIL,
+					  cprep.prep_estates,	/* lives in portalContext */
 					  cplan);
 
 	/*
@@ -2078,6 +2083,7 @@ SPI_plan_get_cached_plan(SPIPlanPtr plan)
 {
 	CachedPlanSource *plansource;
 	CachedPlan *cplan;
+	CachedPlanPrepData cprep = {0};
 	SPICallbackArg spicallbackarg;
 	ErrorContextCallback spierrcontext;
 
@@ -2101,9 +2107,13 @@ SPI_plan_get_cached_plan(SPIPlanPtr plan)
 	error_context_stack = &spierrcontext;
 
 	/* Get the generic plan for the query */
+	/* ExecutorPrep() state lives in caller's active context. */
+	cprep.context = CurrentMemoryContext;
+	cprep.owner = CurrentResourceOwner;
 	cplan = GetCachedPlan(plansource, NULL,
 						  plan->saved ? CurrentResourceOwner : NULL,
-						  _SPI_current->queryEnv);
+						  _SPI_current->queryEnv,
+						  &cprep);
 	Assert(cplan == plansource->gplan);
 
 	/* Pop the error context stack */
@@ -2502,6 +2512,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
 		ListCell   *lc2;
 		List	   *prep_estates;
 		ListCell   *prep_lc;
+		CachedPlanPrepData cprep = {0};
 
 		spicallbackarg.query = plansource->query_string;
 
@@ -2576,11 +2587,16 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
 		 * Replan if needed, and increment plan refcount.  If it's a saved
 		 * plan, the refcount must be backed by the plan_owner.
 		 */
+
+		/* ExecutorPrep state is per _SPI_execute_plan call. */
+		cprep.context = CurrentMemoryContext;
+		cprep.owner = CurrentResourceOwner;
 		cplan = GetCachedPlan(plansource, options->params,
-							  plan_owner, _SPI_current->queryEnv);
+							  plan_owner, _SPI_current->queryEnv,
+							  &cprep);
 
 		stmt_list = cplan->stmt_list;
-		prep_estates = NIL;
+		prep_estates = cprep.prep_estates;
 
 		/*
 		 * If we weren't given a specific snapshot to use, and the statement
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 757bdc7b1de..10470297bdb 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -655,6 +655,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
 											  glob->prunableRelids);
 	result->permInfos = glob->finalrteperminfos;
 	result->resultRelations = glob->resultRelations;
+	result->firstResultRels = glob->firstResultRels;
 	result->appendRelations = glob->appendRelations;
 	result->subplans = glob->subplans;
 	result->rewindPlanIDs = glob->rewindPlanIDs;
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 16d200cfb46..d20a66e3e37 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -381,6 +381,26 @@ set_plan_references(PlannerInfo *root, Plan *plan)
 		}
 	}
 
+	/*
+	 * Record the first result relation if it belongs to the set of
+	 * initially prunable relations.  We use bms_next_member() to get
+	 * the lowest-numbered leaf result rel, which matches
+	 * linitial_int(ModifyTable.resultRelations) because partition
+	 * expansion preserves RT index order.  There is one ModifyTable
+	 * per query level, so this captures exactly one entry per level.
+	 * ExecInitModifyTable() asserts that the recorded index matches
+	 * what it actually needs.
+	 */
+	if (root->leaf_result_relids)
+	{
+		Index	firstResultRel = bms_next_member(root->leaf_result_relids, -1);
+
+		firstResultRel += rtoffset;
+		if (bms_is_member(firstResultRel, root->glob->prunableRelids))
+			root->glob->firstResultRels =
+				lappend_int(root->glob->firstResultRels, firstResultRel);
+	}
+
 	return result;
 }
 
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 5541c574c8b..b749b9c8d1a 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -1636,6 +1636,7 @@ exec_bind_message(StringInfo input_message)
 	int16	   *rformats = NULL;
 	CachedPlanSource *psrc;
 	CachedPlan *cplan;
+	CachedPlanPrepData cprep = {0};
 	Portal		portal;
 	char	   *query_string;
 	char	   *saved_stmt_name;
@@ -2017,7 +2018,11 @@ exec_bind_message(StringInfo input_message)
 	 * will be generated in MessageContext.  The plan refcount will be
 	 * assigned to the Portal, so it will be released at portal destruction.
 	 */
-	cplan = GetCachedPlan(psrc, params, NULL, NULL);
+
+	/* ExecutorPrep() state lives in portal context. */
+	cprep.context = portal->portalContext;
+	cprep.owner = portal->resowner;
+	cplan = GetCachedPlan(psrc, params, NULL, NULL, &cprep);
 
 	/*
 	 * Now we can define the portal.
@@ -2030,7 +2035,7 @@ exec_bind_message(StringInfo input_message)
 					  query_string,
 					  psrc->commandTag,
 					  cplan->stmt_list,
-					  NIL,
+					  cprep.prep_estates,
 					  cplan);
 
 	/* Portal is defined, set the plan ID based on its contents. */
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 37d5d73b7fb..305fe912586 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -93,7 +93,7 @@ static bool StmtPlanRequiresRevalidation(CachedPlanSource *plansource);
 static bool BuildingPlanRequiresSnapshot(CachedPlanSource *plansource);
 static List *RevalidateCachedQuery(CachedPlanSource *plansource,
 								   QueryEnvironment *queryEnv);
-static bool CheckCachedPlan(CachedPlanSource *plansource);
+static bool PrepAndCheckCachedPlan(CachedPlanSource *plansource, CachedPlanPrepData *cprep);
 static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
 								   ParamListInfo boundParams, QueryEnvironment *queryEnv);
 static bool choose_custom_plan(CachedPlanSource *plansource,
@@ -101,6 +101,9 @@ static bool choose_custom_plan(CachedPlanSource *plansource,
 static double cached_plan_cost(CachedPlan *plan, bool include_planner);
 static Query *QueryListGetPrimaryStmt(List *stmts);
 static void AcquireExecutorLocks(List *stmt_list, bool acquire);
+static void AcquireExecutorLocksUnpruned(List *stmt_list, bool acquire,
+										 CachedPlanPrepData *cprep);
+static void CachedPlanPrepCleanup(CachedPlanPrepData *cprep);
 static void AcquirePlannerLocks(List *stmt_list, bool acquire);
 static void ScanQueryForLocks(Query *parsetree, bool acquire);
 static bool ScanQueryWalker(Node *node, bool *acquire);
@@ -137,6 +140,26 @@ ResourceOwnerForgetPlanCacheRef(ResourceOwner owner, CachedPlan *plan)
 /* GUC parameter */
 int			plan_cache_mode = PLAN_CACHE_MODE_AUTO;
 
+/*
+ * Lock acquisition policy for execution locks.
+ *
+ * LOCK_ALL acquires locks on all relations mentioned in the plan,
+ * reproducing the behavior of AcquireExecutorLocks().
+ *
+ * LOCK_UNPRUNED restricts locking to only the unpruned relations. That
+ * includes those mentioned in PlannedStmt.unprunableRelids and the leaf
+ * partitions remaining after performing initial pruning.
+ */
+typedef enum LockPolicy
+{
+	LOCK_ALL,
+	LOCK_UNPRUNED,
+} LockPolicy;
+
+static void AcquireExecutorLocksWithPolicy(List *stmt_list,
+										   LockPolicy policy, bool acquire,
+										   CachedPlanPrepData *cprep);
+
 /*
  * InitPlanCache: initialize module during InitPostgres.
  *
@@ -938,7 +961,12 @@ RevalidateCachedQuery(CachedPlanSource *plansource,
 }
 
 /*
- * CheckCachedPlan: see if the CachedPlanSource's generic plan is valid.
+ * PrepAndCheckCachedPlan: see if the CachedPlanSource's generic plan is valid.
+ *
+ * If 'cprep' is not NULL, ExecutorPrep() is applied to each PlannedStmt to
+ * compute the set of partitions that survive initial runtime pruning in order
+ * to only lock them.  The EStates created to do so are saved in cprep for
+ * later reuse by ExecutorStart().
  *
  * Caller must have already called RevalidateCachedQuery to verify that the
  * querytree is up to date.
@@ -947,7 +975,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource,
  * (We must do this for the "true" result to be race-condition-free.)
  */
 static bool
-CheckCachedPlan(CachedPlanSource *plansource)
+PrepAndCheckCachedPlan(CachedPlanSource *plansource, CachedPlanPrepData *cprep)
 {
 	CachedPlan *plan = plansource->gplan;
 
@@ -975,13 +1003,15 @@ CheckCachedPlan(CachedPlanSource *plansource)
 	 */
 	if (plan->is_valid)
 	{
+		LockPolicy policy = !cprep ? LOCK_ALL : LOCK_UNPRUNED;
+
 		/*
 		 * Plan must have positive refcount because it is referenced by
 		 * plansource; so no need to fear it disappears under us here.
 		 */
 		Assert(plan->refcount > 0);
 
-		AcquireExecutorLocks(plan->stmt_list, true);
+		AcquireExecutorLocksWithPolicy(plan->stmt_list, policy, true, cprep);
 
 		/*
 		 * If plan was transient, check to see if TransactionXmin has
@@ -1003,7 +1033,10 @@ CheckCachedPlan(CachedPlanSource *plansource)
 		}
 
 		/* Oops, the race case happened.  Release useless locks. */
-		AcquireExecutorLocks(plan->stmt_list, false);
+		AcquireExecutorLocksWithPolicy(plan->stmt_list, policy, false, cprep);
+
+		/* Also clean up ExecutorPrep() state, if necessary. */
+		CachedPlanPrepCleanup(cprep);
 	}
 
 	/*
@@ -1283,6 +1316,11 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
  * On return, the plan is valid and we have sufficient locks to begin
  * execution.
  *
+ * If 'cprep' is not NULL and a generic plan is reused, the function prepares
+ * each PlannedStmt via ExecutorPrep() and stores the EStates in
+ * cprep->prep_estates.  These are intended to be passed later to
+ * ExecutorStart().
+ *
  * On return, the refcount of the plan has been incremented; a later
  * ReleaseCachedPlan() call is expected.  If "owner" is not NULL then
  * the refcount has been reported to that ResourceOwner (note that this
@@ -1293,7 +1331,8 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
  */
 CachedPlan *
 GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
-			  ResourceOwner owner, QueryEnvironment *queryEnv)
+			  ResourceOwner owner, QueryEnvironment *queryEnv,
+			  CachedPlanPrepData *cprep)
 {
 	CachedPlan *plan = NULL;
 	List	   *qlist;
@@ -1315,7 +1354,9 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
 
 	if (!customplan)
 	{
-		if (CheckCachedPlan(plansource))
+		if (cprep)
+			cprep->params = boundParams;
+		if (PrepAndCheckCachedPlan(plansource, cprep))
 		{
 			/* We want a generic plan, and we already have a valid one */
 			plan = plansource->gplan;
@@ -1901,6 +1942,38 @@ QueryListGetPrimaryStmt(List *stmts)
 	return NULL;
 }
 
+/*
+ * AcquireExecutorLocksWithPolicy
+ *		Acquire or release execution locks for a cached plan according to
+ *		the specified policy.
+ *
+ * LOCK_ALL reproduces AcquireExecutorLocks(), locking every relation in
+ * each PlannedStmt's rtable.  LOCK_UNPRUNED restricts locking to the
+ * unprunable rels and partitions that survive initial runtime pruning.
+ *
+ * When LOCK_UNPRUNED is used on acquire, ExecutorPrep() is invoked for
+ * each PlannedStmt and the resulting EStates are appended to
+ * cprep->prep_estates in cprep->context.  On release, the same EState
+ * list is consulted to determine which relations to unlock and each
+ * EState is released.
+ */
+static void
+AcquireExecutorLocksWithPolicy(List *stmt_list, LockPolicy policy, bool acquire,
+							   CachedPlanPrepData *cprep)
+{
+	switch (policy)
+	{
+		case LOCK_ALL:
+			AcquireExecutorLocks(stmt_list, acquire);
+			break;
+		case LOCK_UNPRUNED:
+			AcquireExecutorLocksUnpruned(stmt_list, acquire, cprep);
+			break;
+		default:
+			elog(ERROR, "invalid LockPolicy");
+	}
+}
+
 /*
  * AcquireExecutorLocks: acquire locks needed for execution of a cached plan;
  * or release them if acquire is false.
@@ -1953,6 +2026,211 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
 	}
 }
 
+/*
+ * LockRelids
+ * 		Acquire or release locks on the specified relids, which reference
+ * 		entries in the provided range table.
+ *
+ * Helper for AcquireExecutorLocksUnpruned().
+ */
+static void
+LockRelids(List *rtable, Bitmapset *relids, bool acquire)
+{
+	int	rtindex = -1;
+
+	while ((rtindex = bms_next_member(relids, rtindex)) >= 0)
+	{
+		RangeTblEntry *rte = list_nth_node(RangeTblEntry, rtable, rtindex - 1);
+
+		if (!(rte->rtekind == RTE_RELATION ||
+			  (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid))))
+			elog(ERROR, "LockRelids(): cannot lock relation at RT index %d",
+				 rtindex);
+
+		/*
+		 * Acquire the appropriate type of lock on each relation OID. Note
+		 * that we don't actually try to open the rel, and hence will not
+		 * fail if it's been dropped entirely --- we'll just transiently
+		 * acquire a non-conflicting lock.
+		 */
+		if (acquire)
+			LockRelationOid(rte->relid, rte->rellockmode);
+		else
+			UnlockRelationOid(rte->relid, rte->rellockmode);
+	}
+}
+
+/*
+ * AcquireExecutorLocksUnpruned
+ *		Acquire or release execution locks for only unpruned relations
+ *		referenced by the given PlannedStmts.
+ *
+ * On acquire, this:
+ *	- locks unprunable rels listed in PlannedStmt.unprunableRelids
+ *	- runs ExecutorPrep() to perform initial runtime pruning
+ *	- locks the surviving partitions reported in the prep estate
+ *	- appends the EState pointer for each PlannedStmt to cprep->prep_estates
+ *
+ * On release, it:
+ *	- looks up the EState for each PlannedStmt from cprep->prep_estates
+ *	  (which must already be populated)
+ *	- unlocks the same relations identified during acquire
+ *	- cleans up each EState
+ *
+ * prep_estates is extended during acquire and must match stmt_list one-to-one
+ * when releasing locks.  Memory allocation for EState happens in
+ * cprep->context.  Locks are acquired using cprep->owner.
+ */
+
+static void
+AcquireExecutorLocksUnpruned(List *stmt_list, bool acquire,
+							 CachedPlanPrepData *cprep)
+{
+	MemoryContext oldcontext = MemoryContextSwitchTo(cprep->context);
+	ListCell   *lc1;
+	List	   *prep_estates;
+	ListCell   *prep_lc;
+
+	Assert(cprep);
+
+	/*
+	 * When releasing locks, use the EState list (if any) created during
+	 * acquisition to determine which relids to unlock. The list must match
+	 * the PlannedStmt list one-to-one.
+	 */
+	prep_estates = cprep->prep_estates;
+	Assert(acquire || list_length(prep_estates) == list_length(stmt_list));
+
+	prep_lc = list_head(prep_estates);
+	foreach(lc1, stmt_list)
+	{
+		PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1);
+		EState *prep_estate;
+
+		if (plannedstmt->commandType == CMD_UTILITY)
+		{
+			/* Same as AcquireExecutorLocks(). */
+			Query	   *query = UtilityContainsQuery(plannedstmt->utilityStmt);
+
+			if (query)
+				ScanQueryForLocks(query, acquire);
+
+			/* Keep the list one-to-one with stmt_list. */
+			if (acquire)
+				cprep->prep_estates = lappend(cprep->prep_estates, NULL);
+			else
+				(void) next_prep_estate(prep_estates, &prep_lc);
+			continue;
+		}
+
+		/*
+		 * Lock tables mentioned in the original query and other unprunable
+		 * relations that were added to the plan via inheritance expansion.
+		 */
+		LockRelids(plannedstmt->rtable, plannedstmt->unprunableRelids, acquire);
+
+		/* Lock partitions surviving runtime initial pruning. */
+		if (acquire)
+		{
+			/*
+			 * Pruning expressions may call PL functions that require an active
+			 * snapshot (e.g., via EnsurePortalSnapshotExists()). Establish one
+			 * if needed.
+			 */
+			bool		snap_pushed = false;
+
+			if (!ActiveSnapshotSet())
+			{
+				PushActiveSnapshot(GetTransactionSnapshot());
+				snap_pushed = true;
+			}
+
+			prep_estate = ExecutorPrep(plannedstmt, cprep->params, cprep->owner, true,
+									   cprep->eflags);
+			Assert(prep_estate);
+			cprep->prep_estates = lappend(cprep->prep_estates, prep_estate);
+
+			if (snap_pushed)
+				PopActiveSnapshot();
+		}
+		else
+			prep_estate = next_prep_estate(prep_estates, &prep_lc);
+
+		if (prep_estate)
+		{
+			/*
+			 * es_unpruned_relids includes plannedstmt->unprunableRelids,
+			 * which we've already locked. Filter them out to avoid double-locking.
+			 */
+			Bitmapset *lock_relids = bms_difference(prep_estate->es_unpruned_relids,
+													plannedstmt->unprunableRelids);
+
+			/*
+			 * We must always include the first result relation of each
+			 * ModifyTable node in the plan, that is, the one mentioned in
+			 * plannedstmt->firstResultRels in the set of relations to be
+			 * locked to satisfy executor assumptions described
+			 * in ExecInitModifyTable().  This can be wasteful, because we
+			 * may not need to use the first result relation at all if other
+			 * result relations are unpruned and thus sufficient for the
+			 * ModifyTable node's needs.  Unfortunately, we don't have per-node
+			 * unpruned_relids set to determine that other result relations
+			 * are included.
+			 */
+			if (plannedstmt->resultRelations)
+			{
+				ListCell *lc2;
+
+				foreach(lc2, plannedstmt->firstResultRels)
+				{
+					Index       firstResultRel = lfirst_int(lc2);
+
+					if (!bms_is_member(firstResultRel, lock_relids))
+						lock_relids = bms_add_member(lock_relids, firstResultRel);
+				}
+			}
+
+			LockRelids(plannedstmt->rtable, lock_relids, acquire);
+			bms_free(lock_relids);
+		}
+	}
+
+	MemoryContextSwitchTo(oldcontext);
+}
+
+/*
+ * CachedPlanPrepCleanup
+ *		Clean up EState built for a generic plan.
+ *
+ * This is used in the corner case where PrepAndCheckCachedPlan() discovers
+ * that a CachedPlan has become invalid after AcquireExecutorLocksUnpruned()
+ * has already run.  In that case we must both release the execution locks
+ * and dispose of the ExecPrep list stored in CachedPlanPrepData, since the
+ * executor will never see or clean it up.
+ */
+static void
+CachedPlanPrepCleanup(CachedPlanPrepData *cprep)
+{
+	ListCell   *lc;
+
+	if (cprep == NULL)
+		return;
+
+	foreach(lc, cprep->prep_estates)
+	{
+		EState *prep_estate = (EState *) lfirst(lc);
+
+		if (prep_estate == NULL)
+			continue;
+
+		ExecCloseRangeTableRelations(prep_estate);
+		FreeExecutorState(prep_estate);
+	}
+
+	list_free(cprep->prep_estates);
+	cprep->prep_estates = NIL;
+}
+
 /*
  * AcquirePlannerLocks: acquire locks needed for planning of a querytree list;
  * or release them if acquire is false.
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index fb808823acf..653bd46ce05 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -214,6 +214,9 @@ typedef struct PlannerGlobal
 	/* "flat" list of integer RT indexes */
 	List	   *resultRelations;
 
+	/* "flat" list of integer RT indexes (one per ModifyTable node) */
+	List	   *firstResultRels;
+
 	/* "flat" list of AppendRelInfos */
 	List	   *appendRelations;
 
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 4bc6fb5670e..9e6106751cb 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -123,6 +123,16 @@ typedef struct PlannedStmt
 	/* integer list of RT indexes, or NIL */
 	List	   *resultRelations;
 
+	/*
+	 * rtable indexes of first target relation in each ModifyTable node in the
+	 * plan for INSERT/UPDATE/DELETE/MERGE.  NIL if resultRelations is NIL.
+	 *
+	 * These are used by AcquireExecutorLocksUnpruned() to ensure that the
+	 * first result rel for each ModifyTable remains locked even if pruned;
+	 * see ExecInitModifyTable() for the executor side assumptions.
+	 */
+	List	   *firstResultRels;
+
 	/* list of AppendRelInfo nodes */
 	List	   *appendRelations;
 
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index 984c51515c6..766a11d92a0 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -197,6 +197,30 @@ typedef struct CachedExpression
 } CachedExpression;
 
 
+/*
+ * CachedPlanPrepData
+ *      Carries ExecutorPrep results for each PlannedStmt in a CachedPlan,
+ *      along with context and owner information needed to allocate them.
+ *
+ * prep_estates is indexed one-to-one with CachedPlan->stmt_list, and is
+ * populated when GetCachedPlan() prepares a reused generic plan.  The
+ * same list is later used to determine which relations to unlock when
+ * releasing execution locks.
+ *
+ * ExecutorPrep state is allocated in 'context' and owned by 'owner'.
+ *
+ * eflags should be set properly if it affects initial pruning, for example,
+ * if running EXPLAIN (GENERIC_PLAN).
+ */
+typedef struct CachedPlanPrepData
+{
+	List   *prep_estates;	/* one EState per PlannedStmt, or NULL */
+	ParamListInfo params;	/* params visible to ExecutorPrep */
+	MemoryContext context;	/* where to allocate EState and its fields */
+	ResourceOwner owner;	/* ResourceOwner for ExecutorPrep state */
+	int		eflags;			/* executor flags to control ExecutorPrep */
+} CachedPlanPrepData;
+
 extern void InitPlanCache(void);
 extern void ResetPlanCache(void);
 
@@ -240,7 +264,8 @@ extern List *CachedPlanGetTargetList(CachedPlanSource *plansource,
 extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource,
 								 ParamListInfo boundParams,
 								 ResourceOwner owner,
-								 QueryEnvironment *queryEnv);
+								 QueryEnvironment *queryEnv,
+								 CachedPlanPrepData *cprep);
 extern void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner);
 
 extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource,
diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out
index 39dab8fcc05..39770f3b6d6 100644
--- a/src/test/regress/expected/partition_prune.out
+++ b/src/test/regress/expected/partition_prune.out
@@ -4860,9 +4860,7 @@ select c.relname
    relname    
 --------------
  prunelock_p1
- prunelock_p2
- prunelock_p3
-(3 rows)
+(1 row)
 
 commit;
 deallocate prunelock_q;
@@ -4904,6 +4902,50 @@ select c.relname
 
 commit;
 deallocate prunelock_q;
+reset enable_partition_pruning;
+--
+-- Verify firstResultRels handling with multiple ModifyTable nodes
+-- (writable CTEs) targeting a partitioned table.  When a pruning
+-- parameter matches no partition, all result relations are pruned
+-- and the executor must still find a usable first result relation
+-- for each ModifyTable node.
+--
+prepare prunelock_mt_q (int, int) as
+  with upd1 as (update prunelock_p set a = a),
+       upd2 as (update prunelock_p set a = a where a = $2)
+  update prunelock_p set a = a where a = $1;
+-- Force generic plan creation
+explain (costs off) execute prunelock_mt_q(1, 2);
+                         QUERY PLAN                         
+------------------------------------------------------------
+ Update on prunelock_p
+   Update on prunelock_p1 prunelock_p_1
+   CTE upd1
+     ->  Update on prunelock_p prunelock_p_3
+           Update on prunelock_p1 prunelock_p_4
+           Update on prunelock_p2 prunelock_p_5
+           Update on prunelock_p3 prunelock_p_6
+           ->  Append
+                 ->  Seq Scan on prunelock_p1 prunelock_p_4
+                 ->  Seq Scan on prunelock_p2 prunelock_p_5
+                 ->  Seq Scan on prunelock_p3 prunelock_p_6
+   CTE upd2
+     ->  Update on prunelock_p prunelock_p_7
+           Update on prunelock_p2 prunelock_p_8
+           ->  Append
+                 Subplans Removed: 2
+                 ->  Seq Scan on prunelock_p2 prunelock_p_8
+                       Filter: (a = $2)
+   ->  Append
+         Subplans Removed: 2
+         ->  Seq Scan on prunelock_p1 prunelock_p_1
+               Filter: (a = $1)
+(22 rows)
+
+-- All partitions pruned: value 4 matches no partition, so each
+-- ModifyTable must still initialize correctly with no matching
+-- result relations.
+execute prunelock_mt_q(4, 5);
+deallocate prunelock_mt_q;
 drop table prunelock_p;
 reset plan_cache_mode;
-reset enable_partition_pruning;
diff --git a/src/test/regress/expected/plancache.out b/src/test/regress/expected/plancache.out
index 4e59188196c..1d69ab0a1c2 100644
--- a/src/test/regress/expected/plancache.out
+++ b/src/test/regress/expected/plancache.out
@@ -398,3 +398,65 @@ select name, generic_plans, custom_plans from pg_prepared_statements
 (1 row)
 
 drop table test_mode;
+-- Test invalidation of a generic plan during pruning-aware lock setup.
+-- The pruning expression uses a stable SQL function that calls a volatile
+-- plpgsql function.  That function performs DDL on a partition when a
+-- separate "signal" table says to do so.  The second EXECUTE should
+-- replan cleanly after the DDL.
+set plan_cache_mode to force_generic_plan;
+create table inval_during_pruning_p (a int) partition by list (a);
+create table inval_during_pruning_p1 partition of inval_during_pruning_p for values in (1);
+create table inval_during_pruning_p2 partition of inval_during_pruning_p for values in (2);
+insert into inval_during_pruning_p values (1), (2);
+create table inval_during_pruning_signal (create_idx bool not null);
+insert into inval_during_pruning_signal values (false);
+create or replace function invalidate_plancache_func() returns int
+as $$
+declare
+	create_index bool;
+begin
+	-- Perform DDL on a partition if asked to
+	select create_idx into create_index from inval_during_pruning_signal for update;
+	if create_index = true then
+		raise notice 'creating index on partition inval_during_pruning_p1';
+		create index on inval_during_pruning_p1 (a);
+		update inval_during_pruning_signal set create_idx = false;
+	end if;
+	-- pruning parameter
+	return 1;
+end;
+$$ language plpgsql volatile;
+create or replace function stable_pruning_val() returns int as $$
+	select invalidate_plancache_func();
+$$ language sql stable;
+prepare inval_during_pruning_q as select * from inval_during_pruning_p where a = stable_pruning_val();
+-- Build a generic plan and run pruning once, but don't set the signal
+-- for invalidate_plancache_func() to perform the DDL.
+explain (verbose, costs off) execute inval_during_pruning_q;
+                                QUERY PLAN                                 
+---------------------------------------------------------------------------
+ Append
+   Subplans Removed: 1
+   ->  Seq Scan on public.inval_during_pruning_p1 inval_during_pruning_p_1
+         Output: inval_during_pruning_p_1.a
+         Filter: (inval_during_pruning_p_1.a = stable_pruning_val())
+(5 rows)
+
+-- Reuse the generic plan.  Make invalidate_plancache_func() perform DDL
+-- during this execution, which should force replanning without errors.
+update inval_during_pruning_signal set create_idx = true;
+explain (verbose, costs off) execute inval_during_pruning_q;
+NOTICE:  creating index on partition inval_during_pruning_p1
+                                QUERY PLAN                                 
+---------------------------------------------------------------------------
+ Append
+   Subplans Removed: 1
+   ->  Seq Scan on public.inval_during_pruning_p1 inval_during_pruning_p_1
+         Output: inval_during_pruning_p_1.a
+         Filter: (inval_during_pruning_p_1.a = stable_pruning_val())
+(5 rows)
+
+drop table inval_during_pruning_p, inval_during_pruning_signal;
+drop function invalidate_plancache_func, stable_pruning_val;
+deallocate inval_during_pruning_q;
+reset plan_cache_mode;
diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql
index 229c5eb370c..87672ad40f7 100644
--- a/src/test/regress/sql/partition_prune.sql
+++ b/src/test/regress/sql/partition_prune.sql
@@ -1499,6 +1499,28 @@ select c.relname
 commit;
 
 deallocate prunelock_q;
+reset enable_partition_pruning;
+
+--
+-- Verify firstResultRels handling with multiple ModifyTable nodes
+-- (writable CTEs) targeting a partitioned table.  When a pruning
+-- parameter matches no partition, all result relations are pruned
+-- and the executor must still find a usable first result relation
+-- for each ModifyTable node.
+--
+prepare prunelock_mt_q (int, int) as
+  with upd1 as (update prunelock_p set a = a),
+       upd2 as (update prunelock_p set a = a where a = $2)
+  update prunelock_p set a = a where a = $1;
+
+-- Force generic plan creation
+explain (costs off) execute prunelock_mt_q(1, 2);
+
+-- All partitions pruned: value 4 matches no partition, so each
+-- ModifyTable must still initialize correctly with no matching
+-- result relations.
+execute prunelock_mt_q(4, 5);
+
+deallocate prunelock_mt_q;
 drop table prunelock_p;
 reset plan_cache_mode;
-reset enable_partition_pruning;
diff --git a/src/test/regress/sql/plancache.sql b/src/test/regress/sql/plancache.sql
index 4b2f11dcc64..139b4688fd6 100644
--- a/src/test/regress/sql/plancache.sql
+++ b/src/test/regress/sql/plancache.sql
@@ -223,3 +223,54 @@ select name, generic_plans, custom_plans from pg_prepared_statements
   where  name = 'test_mode_pp';
 
 drop table test_mode;
+
+-- Test invalidation of a generic plan during pruning-aware lock setup.
+-- The pruning expression uses a stable SQL function that calls a volatile
+-- plpgsql function.  That function performs DDL on a partition when a
+-- separate "signal" table says to do so.  The second EXECUTE should
+-- replan cleanly after the DDL.
+set plan_cache_mode to force_generic_plan;
+create table inval_during_pruning_p (a int) partition by list (a);
+create table inval_during_pruning_p1 partition of inval_during_pruning_p for values in (1);
+create table inval_during_pruning_p2 partition of inval_during_pruning_p for values in (2);
+insert into inval_during_pruning_p values (1), (2);
+
+create table inval_during_pruning_signal (create_idx bool not null);
+insert into inval_during_pruning_signal values (false);
+create or replace function invalidate_plancache_func() returns int
+as $$
+declare
+	create_index bool;
+begin
+	-- Perform DDL on a partition if asked to
+	select create_idx into create_index from inval_during_pruning_signal for update;
+	if create_index = true then
+		raise notice 'creating index on partition inval_during_pruning_p1';
+		create index on inval_during_pruning_p1 (a);
+		update inval_during_pruning_signal set create_idx = false;
+	end if;
+	-- pruning parameter
+	return 1;
+end;
+$$ language plpgsql volatile;
+
+create or replace function stable_pruning_val() returns int as $$
+	select invalidate_plancache_func();
+$$ language sql stable;
+
+prepare inval_during_pruning_q as select * from inval_during_pruning_p where a = stable_pruning_val();
+
+-- Build a generic plan and run pruning once, but don't set the signal
+-- for invalidate_plancache_func() to perform the DDL.
+explain (verbose, costs off) execute inval_during_pruning_q;
+
+-- Reuse the generic plan.  Make invalidate_plancache_func() perform DDL
+-- during this execution, which should force replanning without errors.
+update inval_during_pruning_signal set create_idx = true;
+explain (verbose, costs off) execute inval_during_pruning_q;
+
+drop table inval_during_pruning_p, inval_during_pruning_signal;
+drop function invalidate_plancache_func, stable_pruning_val;
+deallocate inval_during_pruning_q;
+
+reset plan_cache_mode;
-- 
2.47.3

