From 4a8e2b6c4d87e0ae81becd74b4a1e4d5217eb05a Mon Sep 17 00:00:00 2001
From: amitlan <amitlangote09@gmail.com>
Date: Fri, 20 Jan 2023 16:52:31 +0900
Subject: [PATCH v33] Move AcquireExecutorLocks()'s responsibility into the
 executor

---
 contrib/postgres_fdw/postgres_fdw.c           |   4 +
 src/backend/commands/copyto.c                 |   4 +-
 src/backend/commands/createas.c               |   2 +-
 src/backend/commands/explain.c                | 142 ++++++---
 src/backend/commands/extension.c              |   1 +
 src/backend/commands/matview.c                |   2 +-
 src/backend/commands/portalcmds.c             |  16 +-
 src/backend/commands/prepare.c                |  30 +-
 src/backend/executor/execMain.c               | 105 ++++++-
 src/backend/executor/execParallel.c           |   7 +-
 src/backend/executor/execPartition.c          |   4 +
 src/backend/executor/execProcnode.c           |   5 +
 src/backend/executor/execUtils.c              |   5 +-
 src/backend/executor/functions.c              |   1 +
 src/backend/executor/nodeAgg.c                |   2 +
 src/backend/executor/nodeAppend.c             |   4 +
 src/backend/executor/nodeBitmapAnd.c          |   2 +
 src/backend/executor/nodeBitmapHeapscan.c     |   4 +
 src/backend/executor/nodeBitmapOr.c           |   2 +
 src/backend/executor/nodeCustom.c             |   2 +
 src/backend/executor/nodeForeignscan.c        |   4 +
 src/backend/executor/nodeGather.c             |   2 +
 src/backend/executor/nodeGatherMerge.c        |   2 +
 src/backend/executor/nodeGroup.c              |   2 +
 src/backend/executor/nodeHash.c               |   2 +
 src/backend/executor/nodeHashjoin.c           |   4 +
 src/backend/executor/nodeIncrementalSort.c    |   2 +
 src/backend/executor/nodeIndexonlyscan.c      |   2 +
 src/backend/executor/nodeIndexscan.c          |   2 +
 src/backend/executor/nodeLimit.c              |   2 +
 src/backend/executor/nodeLockRows.c           |   2 +
 src/backend/executor/nodeMaterial.c           |   2 +
 src/backend/executor/nodeMemoize.c            |   2 +
 src/backend/executor/nodeMergeAppend.c        |   4 +
 src/backend/executor/nodeMergejoin.c          |   4 +
 src/backend/executor/nodeModifyTable.c        |   7 +
 src/backend/executor/nodeNestloop.c           |   4 +
 src/backend/executor/nodeProjectSet.c         |   2 +
 src/backend/executor/nodeRecursiveunion.c     |   4 +
 src/backend/executor/nodeResult.c             |   2 +
 src/backend/executor/nodeSamplescan.c         |   2 +
 src/backend/executor/nodeSeqscan.c            |   2 +
 src/backend/executor/nodeSetOp.c              |   2 +
 src/backend/executor/nodeSort.c               |   2 +
 src/backend/executor/nodeSubqueryscan.c       |   2 +
 src/backend/executor/nodeTidrangescan.c       |   2 +
 src/backend/executor/nodeTidscan.c            |   2 +
 src/backend/executor/nodeUnique.c             |   2 +
 src/backend/executor/nodeWindowAgg.c          |   2 +
 src/backend/executor/spi.c                    |  44 ++-
 src/backend/nodes/Makefile                    |   1 +
 src/backend/nodes/gen_node_support.pl         |   2 +
 src/backend/optimizer/plan/planner.c          |   1 +
 src/backend/optimizer/plan/setrefs.c          |   5 +
 src/backend/rewrite/rewriteHandler.c          |   7 +-
 src/backend/storage/lmgr/lmgr.c               |  45 +++
 src/backend/tcop/postgres.c                   |   8 +
 src/backend/tcop/pquery.c                     | 291 +++++++++---------
 src/backend/utils/cache/lsyscache.c           |  21 ++
 src/backend/utils/cache/plancache.c           | 134 +++-----
 src/backend/utils/mmgr/portalmem.c            |   6 +
 src/include/commands/explain.h                |   7 +-
 src/include/executor/execdesc.h               |   5 +
 src/include/executor/executor.h               |  12 +
 src/include/nodes/execnodes.h                 |   2 +
 src/include/nodes/meson.build                 |   1 +
 src/include/nodes/pathnodes.h                 |   3 +
 src/include/nodes/plannodes.h                 |   3 +
 src/include/storage/lmgr.h                    |   1 +
 src/include/utils/lsyscache.h                 |   1 +
 src/include/utils/plancache.h                 |  15 +
 src/include/utils/portal.h                    |   4 +
 src/test/modules/delay_execution/Makefile     |   3 +-
 .../modules/delay_execution/delay_execution.c |  63 +++-
 .../expected/cached-plan-replan.out           |  43 +++
 .../specs/cached-plan-replan.spec             |  39 +++
 76 files changed, 846 insertions(+), 340 deletions(-)
 create mode 100644 src/test/modules/delay_execution/expected/cached-plan-replan.out
 create mode 100644 src/test/modules/delay_execution/specs/cached-plan-replan.spec

diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index f5926ab89d..93f3f8b5d1 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -2659,7 +2659,11 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
 	/* Get info about foreign table. */
 	rtindex = node->resultRelInfo->ri_RangeTableIndex;
 	if (fsplan->scan.scanrelid == 0)
+	{
 		dmstate->rel = ExecOpenScanRelation(estate, rtindex, eflags);
+		if (!ExecPlanStillValid(estate))
+			return;
+	}
 	else
 		dmstate->rel = node->ss.ss_currentRelation;
 	table = GetForeignTable(RelationGetRelid(dmstate->rel));
diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c
index 8043b4e9b1..a438c547e8 100644
--- a/src/backend/commands/copyto.c
+++ b/src/backend/commands/copyto.c
@@ -558,7 +558,8 @@ BeginCopyTo(ParseState *pstate,
 		((DR_copy *) dest)->cstate = cstate;
 
 		/* Create a QueryDesc requesting no output */
-		cstate->queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
+		cstate->queryDesc = CreateQueryDesc(plan, NULL,
+											pstate->p_sourcetext,
 											GetActiveSnapshot(),
 											InvalidSnapshot,
 											dest, NULL, NULL, 0);
@@ -569,6 +570,7 @@ BeginCopyTo(ParseState *pstate,
 		 * ExecutorStart computes a result tupdesc for us
 		 */
 		ExecutorStart(cstate->queryDesc, 0);
+		Assert(cstate->queryDesc->plan_valid);
 
 		tupDesc = cstate->queryDesc->tupDesc;
 	}
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index d6c6d514f3..a55b851574 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -325,7 +325,7 @@ ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt,
 		UpdateActiveSnapshotCommandId();
 
 		/* Create a QueryDesc, redirecting output to our tuple receiver */
-		queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
+		queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext,
 									GetActiveSnapshot(), InvalidSnapshot,
 									dest, params, queryEnv, 0);
 
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index fbbf28cf06..8fdc966a73 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -384,6 +384,7 @@ ExplainOneQuery(Query *query, int cursorOptions,
 	else
 	{
 		PlannedStmt *plan;
+		QueryDesc   *queryDesc;
 		instr_time	planstart,
 					planduration;
 		BufferUsage bufusage_start,
@@ -406,12 +407,89 @@ ExplainOneQuery(Query *query, int cursorOptions,
 			BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
 		}
 
+		queryDesc = ExplainQueryDesc(plan, NULL, queryString, into, es,
+									 params, queryEnv);
+		Assert(queryDesc);
+
 		/* run it (if needed) and produce output */
-		ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
+		ExplainOnePlan(queryDesc, into, es, queryString, params, queryEnv,
 					   &planduration, (es->buffers ? &bufusage : NULL));
 	}
 }
 
+/*
+ * ExplainQueryDesc
+ *		Set up QueryDesc for EXPLAINing a given plan
+ *
+ * This returns NULL if cplan is found to have been invalidated since its
+ * creation.
+ */
+QueryDesc *
+ExplainQueryDesc(PlannedStmt *stmt, CachedPlan *cplan,
+				 const char *queryString, IntoClause *into, ExplainState *es,
+				 ParamListInfo params, QueryEnvironment *queryEnv)
+{
+	QueryDesc  *queryDesc;
+	DestReceiver *dest;
+	int			eflags;
+	int			instrument_option = 0;
+
+	/*
+	 * Normally we discard the query's output, but if explaining CREATE TABLE
+	 * AS, we'd better use the appropriate tuple receiver.
+	 */
+	if (into)
+		dest = CreateIntoRelDestReceiver(into);
+	else
+		dest = None_Receiver;
+
+	if (es->analyze && es->timing)
+		instrument_option |= INSTRUMENT_TIMER;
+	else if (es->analyze)
+		instrument_option |= INSTRUMENT_ROWS;
+
+	if (es->buffers)
+		instrument_option |= INSTRUMENT_BUFFERS;
+	if (es->wal)
+		instrument_option |= INSTRUMENT_WAL;
+
+	/*
+	 * Use a snapshot with an updated command ID to ensure this query sees
+	 * results of any previously executed queries.
+	 */
+	PushCopiedSnapshot(GetActiveSnapshot());
+	UpdateActiveSnapshotCommandId();
+
+	/* Create a QueryDesc for the query */
+	queryDesc = CreateQueryDesc(stmt, cplan, queryString,
+								GetActiveSnapshot(), InvalidSnapshot,
+								dest, params, queryEnv, instrument_option);
+
+	/* Select execution options */
+	if (es->analyze)
+		eflags = 0;				/* default run-to-completion flags */
+	else
+		eflags = EXEC_FLAG_EXPLAIN_ONLY;
+	if (into)
+		eflags |= GetIntoRelEFlags(into);
+
+	/*
+	 * Call ExecutorStart to prepare the plan for execution.  A cached plan
+	 * may get invalidated as we're doing that.
+	 */
+	ExecutorStart(queryDesc, eflags);
+	if (!queryDesc->plan_valid)
+	{
+		/* Clean up. */
+		ExecutorEnd(queryDesc);
+		FreeQueryDesc(queryDesc);
+		PopActiveSnapshot();
+		return NULL;
+	}
+
+	return queryDesc;
+}
+
 /*
  * ExplainOneUtility -
  *	  print out the execution plan for one utility statement
@@ -515,29 +593,16 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
  * to call it.
  */
 void
-ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
+ExplainOnePlan(QueryDesc *queryDesc,
+			   IntoClause *into, ExplainState *es,
 			   const char *queryString, ParamListInfo params,
 			   QueryEnvironment *queryEnv, const instr_time *planduration,
 			   const BufferUsage *bufusage)
 {
-	DestReceiver *dest;
-	QueryDesc  *queryDesc;
 	instr_time	starttime;
 	double		totaltime = 0;
-	int			eflags;
-	int			instrument_option = 0;
-
-	Assert(plannedstmt->commandType != CMD_UTILITY);
 
-	if (es->analyze && es->timing)
-		instrument_option |= INSTRUMENT_TIMER;
-	else if (es->analyze)
-		instrument_option |= INSTRUMENT_ROWS;
-
-	if (es->buffers)
-		instrument_option |= INSTRUMENT_BUFFERS;
-	if (es->wal)
-		instrument_option |= INSTRUMENT_WAL;
+	Assert(queryDesc->plannedstmt->commandType != CMD_UTILITY);
 
 	/*
 	 * We always collect timing for the entire statement, even when node-level
@@ -546,38 +611,6 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 	 */
 	INSTR_TIME_SET_CURRENT(starttime);
 
-	/*
-	 * Use a snapshot with an updated command ID to ensure this query sees
-	 * results of any previously executed queries.
-	 */
-	PushCopiedSnapshot(GetActiveSnapshot());
-	UpdateActiveSnapshotCommandId();
-
-	/*
-	 * Normally we discard the query's output, but if explaining CREATE TABLE
-	 * AS, we'd better use the appropriate tuple receiver.
-	 */
-	if (into)
-		dest = CreateIntoRelDestReceiver(into);
-	else
-		dest = None_Receiver;
-
-	/* Create a QueryDesc for the query */
-	queryDesc = CreateQueryDesc(plannedstmt, queryString,
-								GetActiveSnapshot(), InvalidSnapshot,
-								dest, params, queryEnv, instrument_option);
-
-	/* Select execution options */
-	if (es->analyze)
-		eflags = 0;				/* default run-to-completion flags */
-	else
-		eflags = EXEC_FLAG_EXPLAIN_ONLY;
-	if (into)
-		eflags |= GetIntoRelEFlags(into);
-
-	/* call ExecutorStart to prepare the plan for execution */
-	ExecutorStart(queryDesc, eflags);
-
 	/* Execute the plan for statistics if asked for */
 	if (es->analyze)
 	{
@@ -4851,6 +4884,17 @@ ExplainDummyGroup(const char *objtype, const char *labelname, ExplainState *es)
 	}
 }
 
+/*
+ * Discard output buffer for a fresh restart.
+ */
+void
+ExplainResetOutput(ExplainState *es)
+{
+	Assert(es->str);
+	resetStringInfo(es->str);
+	ExplainBeginOutput(es);
+}
+
 /*
  * Emit the start-of-output boilerplate.
  *
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index b1509cc505..e2f79cc7a7 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -780,6 +780,7 @@ execute_sql_string(const char *sql)
 				QueryDesc  *qdesc;
 
 				qdesc = CreateQueryDesc(stmt,
+										NULL,
 										sql,
 										GetActiveSnapshot(), NULL,
 										dest, NULL, NULL, 0);
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index fb30d2595c..17d457ccfb 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -409,7 +409,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query,
 	UpdateActiveSnapshotCommandId();
 
 	/* Create a QueryDesc, redirecting output to our tuple receiver */
-	queryDesc = CreateQueryDesc(plan, queryString,
+	queryDesc = CreateQueryDesc(plan, NULL, queryString,
 								GetActiveSnapshot(), InvalidSnapshot,
 								dest, NULL, NULL, 0);
 
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 8a3cf98cce..3c34ab4351 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -146,6 +146,7 @@ PerformCursorOpen(ParseState *pstate, DeclareCursorStmt *cstmt, ParamListInfo pa
 	PortalStart(portal, params, 0, GetActiveSnapshot());
 
 	Assert(portal->strategy == PORTAL_ONE_SELECT);
+	Assert(portal->plan_valid);
 
 	/*
 	 * We're done; the query won't actually be run until PerformPortalFetch is
@@ -249,6 +250,17 @@ PerformPortalClose(const char *name)
 	PortalDrop(portal, false);
 }
 
+/*
+ * Release a portal's QueryDesc.
+ */
+void
+PortalQueryFinish(QueryDesc *queryDesc)
+{
+	ExecutorFinish(queryDesc);
+	ExecutorEnd(queryDesc);
+	FreeQueryDesc(queryDesc);
+}
+
 /*
  * PortalCleanup
  *
@@ -295,9 +307,7 @@ PortalCleanup(Portal portal)
 			if (portal->resowner)
 				CurrentResourceOwner = portal->resowner;
 
-			ExecutorFinish(queryDesc);
-			ExecutorEnd(queryDesc);
-			FreeQueryDesc(queryDesc);
+			PortalQueryFinish(queryDesc);
 
 			CurrentResourceOwner = saveResourceOwner;
 		}
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 18f70319fc..3099536a54 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -183,6 +183,7 @@ ExecuteQuery(ParseState *pstate,
 		paramLI = EvaluateParams(pstate, entry, stmt->params, estate);
 	}
 
+replan:
 	/* Create a new portal to run the query in */
 	portal = CreateNewPortal();
 	/* Don't display the portal in pg_cursors, it is for internal use only */
@@ -251,10 +252,17 @@ ExecuteQuery(ParseState *pstate,
 	}
 
 	/*
-	 * Run the portal as appropriate.
+	 * Run the portal as appropriate.  If the portal contains a cached plan,
+	 * it must be recreated if *replan is set.
 	 */
 	PortalStart(portal, paramLI, eflags, GetActiveSnapshot());
 
+	if (!portal->plan_valid)
+	{
+		PortalDrop(portal, false);
+		goto replan;
+	}
+
 	(void) PortalRun(portal, count, false, true, dest, dest, qc);
 
 	PortalDrop(portal, false);
@@ -574,7 +582,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
 {
 	PreparedStatement *entry;
 	const char *query_string;
-	CachedPlan *cplan;
+	CachedPlan *cplan = NULL;
 	List	   *plan_list;
 	ListCell   *p;
 	ParamListInfo paramLI = NULL;
@@ -618,6 +626,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
 	}
 
 	/* Replan if needed, and acquire a transient refcount */
+replan:
 	cplan = GetCachedPlan(entry->plansource, paramLI,
 						  CurrentResourceOwner, queryEnv);
 
@@ -639,8 +648,21 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
 		PlannedStmt *pstmt = lfirst_node(PlannedStmt, p);
 
 		if (pstmt->commandType != CMD_UTILITY)
-			ExplainOnePlan(pstmt, into, es, query_string, paramLI, queryEnv,
-						   &planduration, (es->buffers ? &bufusage : NULL));
+		{
+			QueryDesc *queryDesc;
+
+			queryDesc = ExplainQueryDesc(pstmt, cplan, queryString,
+										 into, es, paramLI, queryEnv);
+			if (queryDesc == NULL)
+			{
+				ExplainResetOutput(es);
+				ReleaseCachedPlan(cplan, CurrentResourceOwner);
+				goto replan;
+			}
+			ExplainOnePlan(queryDesc, into, es, query_string, paramLI,
+						   queryEnv, &planduration,
+						   (es->buffers ? &bufusage : NULL));
+		}
 		else
 			ExplainOneUtility(pstmt->utilityStmt, into, es, query_string,
 							  paramLI, queryEnv);
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index a5115b9c1f..d97c9de409 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -126,11 +126,27 @@ static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
  * get control when ExecutorStart is called.  Such a plugin would
  * normally call standard_ExecutorStart().
  *
+ * Normally, the plan tree given in queryDesc->plannedstmt is known to be
+ * valid in that *all* relations contained in plannedstmt->relationOids have
+ * already been locked.  That may not be the case however if the plannedstmt
+ * comes from a CachedPlan, one given in queryDesc->cplan.  Locks necessary
+ * to validate such a plan tree must be taken when initializing the plan tree
+ * in InitPlan(), so this sets the eflag EXEC_FLAG_GET_LOCKS.  If the
+ * CachedPlan gets invalidated as these locks are taken, InitPlan returns
+ * without setting queryDesc->planstate and sets queryDesc->plan_valid to
+ * false.  Caller must retry the execution with a freshly created CachedPlan
+ * in that case.
  * ----------------------------------------------------------------
  */
 void
 ExecutorStart(QueryDesc *queryDesc, int eflags)
 {
+	/* Take locks if the plan tree comes from a CachedPlan. */
+	Assert(queryDesc->cplan == NULL ||
+		   CachedPlanStillValid(queryDesc->cplan));
+	if (queryDesc->cplan)
+		eflags |= EXEC_FLAG_GET_LOCKS;
+
 	/*
 	 * In some cases (e.g. an EXECUTE statement) a query execution will skip
 	 * parse analysis, which means that the query_id won't be reported.  Note
@@ -582,6 +598,16 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos,
 		RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
 
 		Assert(OidIsValid(perminfo->relid));
+
+		/*
+		 * Relations whose permissions need to be checked must already have
+		 * been locked by the parser or by AcquirePlannerLocks() if a
+		 * cached plan is being executed.
+		 * XXX shouldn't we skip calling ExecCheckPermissions from InitPlan
+		 * in a parallel worker?
+		 */
+		Assert(CheckRelLockedByMe(perminfo->relid, AccessShareLock, true) ||
+			   IsParallelWorker());
 		result = ExecCheckOneRelPerms(perminfo);
 		if (!result)
 		{
@@ -785,12 +811,43 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
 		PreventCommandIfParallelMode(CreateCommandName((Node *) plannedstmt));
 }
 
+/*
+ * Lock view relations in a given query's range table.
+ */
+static void
+ExecLockViewRelations(List *viewRelations, EState *estate, bool acquire)
+{
+	ListCell *lc;
+
+	foreach(lc, viewRelations)
+	{
+		Index	rti = lfirst_int(lc);
+		RangeTblEntry *rte = exec_rt_fetch(rti, estate);
+
+		Assert(OidIsValid(rte->relid));
+		Assert(rte->relkind == RELKIND_VIEW);
+		Assert(rte->rellockmode != NoLock);
+
+		if (acquire)
+			LockRelationOid(rte->relid, rte->rellockmode);
+		else
+			UnlockRelationOid(rte->relid, rte->rellockmode);
+	}
+}
 
 /* ----------------------------------------------------------------
  *		InitPlan
  *
  *		Initializes the query plan: open files, allocate storage
  *		and start up the rule manager
+ *
+ *		If queryDesc contains a CachedPlan, this takes locks on relations.
+ *		If any of those relations have undergone concurrent schema changes
+ *		between successfully performing RevalidateCachedQuery() on the
+ *		containing CachedPlanSource and here, locking those relations would
+ *		invalidate the CachedPlan by way of PlanCacheRelCallback().  In that
+ *		case, queryDesc->plan_valid would be set to false to tell the caller
+ *		to retry after creating a new CachedPlan.
  * ----------------------------------------------------------------
  */
 static void
@@ -807,17 +864,21 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 	int			i;
 
 	/*
-	 * Do permissions checks and save the list for later use.
+	 * initialize the node's execution state
 	 */
-	ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
-	estate->es_rteperminfos = plannedstmt->permInfos;
+	ExecInitRangeTable(estate, rangeTable);
+
+	if (eflags & EXEC_FLAG_GET_LOCKS)
+		ExecLockViewRelations(plannedstmt->viewRelations, estate, true);
 
 	/*
-	 * initialize the node's execution state
+	 * Do permissions checks and save the list for later use.
 	 */
-	ExecInitRangeTable(estate, rangeTable);
+	ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
+	estate->es_rteperminfos = plannedstmt->permInfos;
 
 	estate->es_plannedstmt = plannedstmt;
+	estate->es_cachedplan = queryDesc->cplan;
 	estate->es_part_prune_infos = plannedstmt->partPruneInfos;
 
 	/*
@@ -850,6 +911,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 				case ROW_MARK_KEYSHARE:
 				case ROW_MARK_REFERENCE:
 					relation = ExecGetRangeTableRelation(estate, rc->rti);
+					if (!ExecPlanStillValid(estate))
+						goto failed;
 					break;
 				case ROW_MARK_COPY:
 					/* no physical table access is required */
@@ -917,6 +980,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 			sp_eflags |= EXEC_FLAG_REWIND;
 
 		subplanstate = ExecInitNode(subplan, estate, sp_eflags);
+		if (!ExecPlanStillValid(estate))
+			goto failed;
 
 		estate->es_subplanstates = lappend(estate->es_subplanstates,
 										   subplanstate);
@@ -930,6 +995,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 	 * processing tuples.
 	 */
 	planstate = ExecInitNode(plan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		goto failed;
 
 	/*
 	 * Get the tuple descriptor describing the type of tuples to return.
@@ -973,6 +1040,19 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 
 	queryDesc->tupDesc = tupType;
 	queryDesc->planstate = planstate;
+	queryDesc->plan_valid = true;
+	return;
+
+failed:
+	/*
+	 * Plan initialization failed.  Mark QueryDesc as such and release useless
+	 * locks.
+	 */
+	queryDesc->plan_valid = false;
+	if (eflags & EXEC_FLAG_GET_LOCKS)
+		ExecLockViewRelations(plannedstmt->viewRelations, estate, false);
+	/* Also ask ExecCloseRangeTableRelations() to release locks. */
+	estate->es_top_eflags |= EXEC_FLAG_REL_LOCKS;
 }
 
 /*
@@ -1389,7 +1469,7 @@ ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
 
 			/*
 			 * All ancestors up to the root target relation must have been
-			 * locked by the planner or AcquireExecutorLocks().
+			 * locked.
 			 */
 			ancRel = table_open(ancOid, NoLock);
 			rInfo = makeNode(ResultRelInfo);
@@ -1558,7 +1638,8 @@ ExecCloseResultRelations(EState *estate)
 /*
  * Close all relations opened by ExecGetRangeTableRelation().
  *
- * We do not release any locks we might hold on those rels.
+ * We do not release any locks we might hold on those rels, unless
+ * the caller asked otherwise.
  */
 void
 ExecCloseRangeTableRelations(EState *estate)
@@ -1567,8 +1648,12 @@ ExecCloseRangeTableRelations(EState *estate)
 
 	for (i = 0; i < estate->es_range_table_size; i++)
 	{
+		int		lockmode = NoLock;
+
+		if (estate->es_top_eflags & EXEC_FLAG_REL_LOCKS)
+			lockmode = exec_rt_fetch(i+1, estate)->rellockmode;
 		if (estate->es_relations[i])
-			table_close(estate->es_relations[i], NoLock);
+			table_close(estate->es_relations[i], lockmode);
 	}
 }
 
@@ -2797,7 +2882,8 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
 	 * Child EPQ EStates share the parent's copy of unchanging state such as
 	 * the snapshot, rangetable, and external Param info.  They need their own
 	 * copies of local state, including a tuple table, es_param_exec_vals,
-	 * result-rel info, etc.
+	 * result-rel info, etc.  Also, we don't pass the parent't copy of the
+	 * CachedPlan, because no new locks will be taken for EvalPlanQual().
 	 */
 	rcestate->es_direction = ForwardScanDirection;
 	rcestate->es_snapshot = parentestate->es_snapshot;
@@ -2883,6 +2969,7 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
 		PlanState  *subplanstate;
 
 		subplanstate = ExecInitNode(subplan, rcestate, 0);
+		Assert(ExecPlanStillValid(rcestate));
 		rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
 											 subplanstate);
 	}
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index aa3f283453..fe1d173501 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -1249,8 +1249,13 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
 	paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
 	paramLI = RestoreParamList(&paramspace);
 
-	/* Create a QueryDesc for the query. */
+	/*
+	 * Create a QueryDesc for the query.  Note that no CachedPlan is available
+	 * here even if the containing plan tree may have come from one in the
+	 * leader.
+	 */
 	return CreateQueryDesc(pstmt,
+						   NULL,
 						   queryString,
 						   GetActiveSnapshot(), InvalidSnapshot,
 						   receiver, paramLI, NULL, instrument_options);
diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c
index 651ad24fc1..a1bb1ac50f 100644
--- a/src/backend/executor/execPartition.c
+++ b/src/backend/executor/execPartition.c
@@ -1813,6 +1813,8 @@ ExecInitPartitionPruning(PlanState *planstate,
 
 	/* Create the working data structure for pruning */
 	prunestate = CreatePartitionPruneState(planstate, pruneinfo);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Perform an initial partition prune pass, if required.
@@ -1939,6 +1941,8 @@ CreatePartitionPruneState(PlanState *planstate, PartitionPruneInfo *pruneinfo)
 			 * duration of this executor run.
 			 */
 			partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex);
+			if (!ExecPlanStillValid(estate))
+				return NULL;
 			partkey = RelationGetPartitionKey(partrel);
 			partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
 												partrel);
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index 4d288bc8d4..bd0c2cba92 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -388,6 +388,9 @@ ExecInitNode(Plan *node, EState *estate, int eflags)
 			break;
 	}
 
+	if (!ExecPlanStillValid(estate))
+		return NULL;
+
 	ExecSetExecProcNode(result, result->ExecProcNode);
 
 	/*
@@ -402,6 +405,8 @@ ExecInitNode(Plan *node, EState *estate, int eflags)
 
 		Assert(IsA(subplan, SubPlan));
 		sstate = ExecInitSubPlan(subplan, result);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 		subps = lappend(subps, sstate);
 	}
 	result->initPlan = subps;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index c33a3c0bec..d5bd268514 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -800,7 +800,8 @@ ExecGetRangeTableRelation(EState *estate, Index rti)
 
 		Assert(rte->rtekind == RTE_RELATION);
 
-		if (!IsParallelWorker())
+		if (!IsParallelWorker() &&
+			(estate->es_top_eflags & EXEC_FLAG_GET_LOCKS) == 0)
 		{
 			/*
 			 * In a normal query, we should already have the appropriate lock,
@@ -844,6 +845,8 @@ ExecInitResultRelation(EState *estate, ResultRelInfo *resultRelInfo,
 	Relation	resultRelationDesc;
 
 	resultRelationDesc = ExecGetRangeTableRelation(estate, rti);
+	if (!ExecPlanStillValid(estate))
+		return;
 	InitResultRelInfo(resultRelInfo,
 					  resultRelationDesc,
 					  rti,
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 50e06ec693..949bdfc837 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -843,6 +843,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
 		dest = None_Receiver;
 
 	es->qd = CreateQueryDesc(es->stmt,
+							 NULL,	/* fmgr_sql() doesn't use CachedPlans */
 							 fcache->src,
 							 GetActiveSnapshot(),
 							 InvalidSnapshot,
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 20d23696a5..f9b668dc01 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -3295,6 +3295,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 		eflags &= ~EXEC_FLAG_REWIND;
 	outerPlan = outerPlan(node);
 	outerPlanState(aggstate) = ExecInitNode(outerPlan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * initialize source tuple type.
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index cb25499b3f..fd0ad98621 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -148,6 +148,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
 											  node->part_prune_index,
 											  node->apprelids,
 											  &validsubplans);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 		appendstate->as_prune_state = prunestate;
 		nplans = bms_num_members(validsubplans);
 
@@ -218,6 +220,8 @@ ExecInitAppend(Append *node, EState *estate, int eflags)
 			firstvalid = j;
 
 		appendplanstates[j++] = ExecInitNode(initNode, estate, eflags);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 	}
 
 	appendstate->as_first_partial_plan = firstvalid;
diff --git a/src/backend/executor/nodeBitmapAnd.c b/src/backend/executor/nodeBitmapAnd.c
index 4c5eb2b23b..98cbeb2502 100644
--- a/src/backend/executor/nodeBitmapAnd.c
+++ b/src/backend/executor/nodeBitmapAnd.c
@@ -89,6 +89,8 @@ ExecInitBitmapAnd(BitmapAnd *node, EState *estate, int eflags)
 	{
 		initNode = (Plan *) lfirst(l);
 		bitmapplanstates[i] = ExecInitNode(initNode, estate, eflags);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 		i++;
 	}
 
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index f35df0b8bf..121b1afa5d 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -763,11 +763,15 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
 	 * open the scan relation
 	 */
 	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * initialize child nodes
 	 */
 	outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * get the scan type from the relation descriptor.
diff --git a/src/backend/executor/nodeBitmapOr.c b/src/backend/executor/nodeBitmapOr.c
index 0bf8af9652..be736946f1 100644
--- a/src/backend/executor/nodeBitmapOr.c
+++ b/src/backend/executor/nodeBitmapOr.c
@@ -90,6 +90,8 @@ ExecInitBitmapOr(BitmapOr *node, EState *estate, int eflags)
 	{
 		initNode = (Plan *) lfirst(l);
 		bitmapplanstates[i] = ExecInitNode(initNode, estate, eflags);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 		i++;
 	}
 
diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c
index bd42c65b29..91239cc500 100644
--- a/src/backend/executor/nodeCustom.c
+++ b/src/backend/executor/nodeCustom.c
@@ -61,6 +61,8 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags)
 	if (scanrelid > 0)
 	{
 		scan_rel = ExecOpenScanRelation(estate, scanrelid, eflags);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 		css->ss.ss_currentRelation = scan_rel;
 	}
 
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index c2139acca0..f130d5863d 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -173,6 +173,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
 	if (scanrelid > 0)
 	{
 		currentRelation = ExecOpenScanRelation(estate, scanrelid, eflags);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 		scanstate->ss.ss_currentRelation = currentRelation;
 		fdwroutine = GetFdwRoutineForRelation(currentRelation, true);
 	}
@@ -264,6 +266,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
 	if (outerPlan(node))
 		outerPlanState(scanstate) =
 			ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Tell the FDW to initialize the scan.
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index 307fc10eea..4a7715b8cc 100644
--- a/src/backend/executor/nodeGather.c
+++ b/src/backend/executor/nodeGather.c
@@ -89,6 +89,8 @@ ExecInitGather(Gather *node, EState *estate, int eflags)
 	 */
 	outerNode = outerPlan(node);
 	outerPlanState(gatherstate) = ExecInitNode(outerNode, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	tupDesc = ExecGetResultType(outerPlanState(gatherstate));
 
 	/*
diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c
index 9d5e1a46e9..9e383c96ff 100644
--- a/src/backend/executor/nodeGatherMerge.c
+++ b/src/backend/executor/nodeGatherMerge.c
@@ -108,6 +108,8 @@ ExecInitGatherMerge(GatherMerge *node, EState *estate, int eflags)
 	 */
 	outerNode = outerPlan(node);
 	outerPlanState(gm_state) = ExecInitNode(outerNode, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Leader may access ExecProcNode result directly (if
diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c
index 25a1618952..87af2a92f9 100644
--- a/src/backend/executor/nodeGroup.c
+++ b/src/backend/executor/nodeGroup.c
@@ -185,6 +185,8 @@ ExecInitGroup(Group *node, EState *estate, int eflags)
 	 * initialize child nodes
 	 */
 	outerPlanState(grpstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize scan slot and type.
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index eceee99374..c8fedee777 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -379,6 +379,8 @@ ExecInitHash(Hash *node, EState *estate, int eflags)
 	 * initialize child nodes
 	 */
 	outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * initialize our result slot and type. No need to build projection
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index b215e3f59a..86420e8f17 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -659,8 +659,12 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
 	hashNode = (Hash *) innerPlan(node);
 
 	outerPlanState(hjstate) = ExecInitNode(outerNode, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	outerDesc = ExecGetResultType(outerPlanState(hjstate));
 	innerPlanState(hjstate) = ExecInitNode((Plan *) hashNode, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	innerDesc = ExecGetResultType(innerPlanState(hjstate));
 
 	/*
diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c
index 12bc22f33c..0456ad779f 100644
--- a/src/backend/executor/nodeIncrementalSort.c
+++ b/src/backend/executor/nodeIncrementalSort.c
@@ -1041,6 +1041,8 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
 	 * nodes may be able to do something more useful.
 	 */
 	outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize scan slot and type.
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 0b43a9b969..e0aaeb5ebd 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -512,6 +512,8 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
 	 * open the scan relation
 	 */
 	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	indexstate->ss.ss_currentRelation = currentRelation;
 	indexstate->ss.ss_currentScanDesc = NULL;	/* no heap scan here */
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 4540c7781d..5090ee39e0 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -925,6 +925,8 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
 	 * open the scan relation
 	 */
 	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	indexstate->ss.ss_currentRelation = currentRelation;
 	indexstate->ss.ss_currentScanDesc = NULL;	/* no heap scan here */
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 425fbfc405..d8789553e1 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -476,6 +476,8 @@ ExecInitLimit(Limit *node, EState *estate, int eflags)
 	 */
 	outerPlan = outerPlan(node);
 	outerPlanState(limitstate) = ExecInitNode(outerPlan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * initialize child expressions
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index 407414fc0c..9104954bb1 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -323,6 +323,8 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
 	 * then initialize outer plan
 	 */
 	outerPlanState(lrstate) = ExecInitNode(outerPlan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/* node returns unmodified slots from the outer plan */
 	lrstate->ps.resultopsset = true;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 09632678b0..6ef50d3960 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -214,6 +214,8 @@ ExecInitMaterial(Material *node, EState *estate, int eflags)
 
 	outerPlan = outerPlan(node);
 	outerPlanState(matstate) = ExecInitNode(outerPlan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize result type and slot. No need to initialize projection info
diff --git a/src/backend/executor/nodeMemoize.c b/src/backend/executor/nodeMemoize.c
index 74f7d21bc8..4ecc60a238 100644
--- a/src/backend/executor/nodeMemoize.c
+++ b/src/backend/executor/nodeMemoize.c
@@ -931,6 +931,8 @@ ExecInitMemoize(Memoize *node, EState *estate, int eflags)
 
 	outerNode = outerPlan(node);
 	outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize return slot and type. No need to initialize projection info
diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c
index 399b39c598..b12a02c028 100644
--- a/src/backend/executor/nodeMergeAppend.c
+++ b/src/backend/executor/nodeMergeAppend.c
@@ -96,6 +96,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
 											  node->part_prune_index,
 											  node->apprelids,
 											  &validsubplans);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 		mergestate->ms_prune_state = prunestate;
 		nplans = bms_num_members(validsubplans);
 
@@ -152,6 +154,8 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
 		Plan	   *initNode = (Plan *) list_nth(node->mergeplans, i);
 
 		mergeplanstates[j++] = ExecInitNode(initNode, estate, eflags);
+		if (!ExecPlanStillValid(estate))
+			return NULL;
 	}
 
 	mergestate->ps.ps_ProjInfo = NULL;
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 809aa215c6..0157a7ff3c 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -1482,11 +1482,15 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags)
 	mergestate->mj_SkipMarkRestore = node->skip_mark_restore;
 
 	outerPlanState(mergestate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	outerDesc = ExecGetResultType(outerPlanState(mergestate));
 	innerPlanState(mergestate) = ExecInitNode(innerPlan(node), estate,
 											  mergestate->mj_SkipMarkRestore ?
 											  eflags :
 											  (eflags | EXEC_FLAG_MARK));
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	innerDesc = ExecGetResultType(innerPlanState(mergestate));
 
 	/*
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 1ac65172e4..355964d103 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -4010,6 +4010,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 							   linitial_int(node->resultRelations));
 	}
 
+	if (!ExecPlanStillValid(estate))
+		return NULL;
+
 	/* set up epqstate with dummy subplan data for the moment */
 	EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
 	mtstate->fireBSTriggers = true;
@@ -4036,6 +4039,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 		if (resultRelInfo != mtstate->rootResultRelInfo)
 		{
 			ExecInitResultRelation(estate, resultRelInfo, resultRelation);
+			if (!ExecPlanStillValid(estate))
+				return NULL;
 
 			/*
 			 * For child result relations, store the root result relation
@@ -4063,6 +4068,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 	 * Now we may initialize the subplan.
 	 */
 	outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Do additional per-result-relation initialization.
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index b3d52e69ec..e4319f5c90 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -295,11 +295,15 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags)
 	 * values.
 	 */
 	outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	if (node->nestParams == NIL)
 		eflags |= EXEC_FLAG_REWIND;
 	else
 		eflags &= ~EXEC_FLAG_REWIND;
 	innerPlanState(nlstate) = ExecInitNode(innerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize result slot, type and projection.
diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c
index f6ff3dc44c..a168cd68f6 100644
--- a/src/backend/executor/nodeProjectSet.c
+++ b/src/backend/executor/nodeProjectSet.c
@@ -247,6 +247,8 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags)
 	 * initialize child nodes
 	 */
 	outerPlanState(state) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * we don't use inner plan
diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c
index e781003934..3dae9b1497 100644
--- a/src/backend/executor/nodeRecursiveunion.c
+++ b/src/backend/executor/nodeRecursiveunion.c
@@ -244,7 +244,11 @@ ExecInitRecursiveUnion(RecursiveUnion *node, EState *estate, int eflags)
 	 * initialize child nodes
 	 */
 	outerPlanState(rustate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	innerPlanState(rustate) = ExecInitNode(innerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * If hashing, precompute fmgr lookup data for inner loop, and create the
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index 4219712d30..9da456be4a 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -208,6 +208,8 @@ ExecInitResult(Result *node, EState *estate, int eflags)
 	 * initialize child nodes
 	 */
 	outerPlanState(resstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * we don't use inner plan
diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c
index d7e22b1dbb..22357e7a0e 100644
--- a/src/backend/executor/nodeSamplescan.c
+++ b/src/backend/executor/nodeSamplescan.c
@@ -125,6 +125,8 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags)
 		ExecOpenScanRelation(estate,
 							 node->scan.scanrelid,
 							 eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/* we won't set up the HeapScanDesc till later */
 	scanstate->ss.ss_currentScanDesc = NULL;
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 4da0f28f7b..b0b34cd14e 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -153,6 +153,8 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
 		ExecOpenScanRelation(estate,
 							 node->scan.scanrelid,
 							 eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/* and create slot with the appropriate rowtype */
 	ExecInitScanTupleSlot(estate, &scanstate->ss,
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 4bc2406b89..2c350e6c24 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -528,6 +528,8 @@ ExecInitSetOp(SetOp *node, EState *estate, int eflags)
 	if (node->strategy == SETOP_HASHED)
 		eflags &= ~EXEC_FLAG_REWIND;
 	outerPlanState(setopstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 	outerDesc = ExecGetResultType(outerPlanState(setopstate));
 
 	/*
diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c
index c6c72c6e67..216a5afb40 100644
--- a/src/backend/executor/nodeSort.c
+++ b/src/backend/executor/nodeSort.c
@@ -263,6 +263,8 @@ ExecInitSort(Sort *node, EState *estate, int eflags)
 	eflags &= ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
 
 	outerPlanState(sortstate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize scan slot and type.
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 42471bfc04..34afe14bea 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -124,6 +124,8 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
 	 * initialize subquery
 	 */
 	subquerystate->subplan = ExecInitNode(node->subplan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize scan slot and type (needed by ExecAssignScanProjectionInfo)
diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c
index 2124c55ef5..613b377c7c 100644
--- a/src/backend/executor/nodeTidrangescan.c
+++ b/src/backend/executor/nodeTidrangescan.c
@@ -386,6 +386,8 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags)
 	 * open the scan relation
 	 */
 	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	tidrangestate->ss.ss_currentRelation = currentRelation;
 	tidrangestate->ss.ss_currentScanDesc = NULL;	/* no table scan here */
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index fe6a964ee1..23782aad89 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -529,6 +529,8 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags)
 	 * open the scan relation
 	 */
 	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	tidstate->ss.ss_currentRelation = currentRelation;
 	tidstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index 45035d74fa..06257e9e51 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -136,6 +136,8 @@ ExecInitUnique(Unique *node, EState *estate, int eflags)
 	 * then initialize outer plan
 	 */
 	outerPlanState(uniquestate) = ExecInitNode(outerPlan(node), estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * Initialize result slot and type. Unique nodes do no projections, so
diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c
index d61d57e9a8..d8a9f1e94e 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -2450,6 +2450,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags)
 	 */
 	outerPlan = outerPlan(node);
 	outerPlanState(winstate) = ExecInitNode(outerPlan, estate, eflags);
+	if (!ExecPlanStillValid(estate))
+		return NULL;
 
 	/*
 	 * initialize source tuple type (which is also the tuple type that we'll
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 61f03e3999..38d76c6719 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -71,7 +71,7 @@ static int	_SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
 static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
 										 Datum *Values, const char *Nulls);
 
-static int	_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
+static int	_SPI_pquery(QueryDesc *queryDesc, uint64 tcount);
 
 static void _SPI_error_callback(void *arg);
 
@@ -1623,6 +1623,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 	_SPI_current->processed = 0;
 	_SPI_current->tuptable = NULL;
 
+replan:
 	/* Create the portal */
 	if (name == NULL || name[0] == '\0')
 	{
@@ -1766,7 +1767,8 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 	}
 
 	/*
-	 * Start portal execution.
+	 * Start portal execution.  If the portal contains a cached plan, it must
+	 * be recreated if *replan is set.
 	 */
 	PortalStart(portal, paramLI, 0, snapshot);
 
@@ -1775,6 +1777,12 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
 	/* Pop the error context stack */
 	error_context_stack = spierrcontext.previous;
 
+	if (!portal->plan_valid)
+	{
+		PortalDrop(portal, false);
+		goto replan;
+	}
+
 	/* Pop the SPI stack */
 	_SPI_end_call(true);
 
@@ -2548,6 +2556,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
 		 * Replan if needed, and increment plan refcount.  If it's a saved
 		 * plan, the refcount must be backed by the plan_owner.
 		 */
+replan:
 		cplan = GetCachedPlan(plansource, options->params,
 							  plan_owner, _SPI_current->queryEnv);
 
@@ -2657,6 +2666,7 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
 			{
 				QueryDesc  *qdesc;
 				Snapshot	snap;
+				int			eflags;
 
 				if (ActiveSnapshotSet())
 					snap = GetActiveSnapshot();
@@ -2664,14 +2674,29 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
 					snap = InvalidSnapshot;
 
 				qdesc = CreateQueryDesc(stmt,
+										cplan,
 										plansource->query_string,
 										snap, crosscheck_snapshot,
 										dest,
 										options->params,
 										_SPI_current->queryEnv,
 										0);
-				res = _SPI_pquery(qdesc, fire_triggers,
-								  canSetTag ? options->tcount : 0);
+
+				/* Select execution options */
+				if (fire_triggers)
+					eflags = 0;				/* default run-to-completion flags */
+				else
+					eflags = EXEC_FLAG_SKIP_TRIGGERS;
+				ExecutorStart(qdesc, eflags);
+				if (!qdesc->plan_valid)
+				{
+					ExecutorFinish(qdesc);
+					ExecutorEnd(qdesc);
+					FreeQueryDesc(qdesc);
+					goto replan;
+				}
+
+				res = _SPI_pquery(qdesc, canSetTag ? options->tcount : 0);
 				FreeQueryDesc(qdesc);
 			}
 			else
@@ -2846,10 +2871,9 @@ _SPI_convert_params(int nargs, Oid *argtypes,
 }
 
 static int
-_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
+_SPI_pquery(QueryDesc *queryDesc, uint64 tcount)
 {
 	int			operation = queryDesc->operation;
-	int			eflags;
 	int			res;
 
 	switch (operation)
@@ -2893,14 +2917,6 @@ _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
 		ResetUsage();
 #endif
 
-	/* Select execution options */
-	if (fire_triggers)
-		eflags = 0;				/* default run-to-completion flags */
-	else
-		eflags = EXEC_FLAG_SKIP_TRIGGERS;
-
-	ExecutorStart(queryDesc, eflags);
-
 	ExecutorRun(queryDesc, ForwardScanDirection, tcount, true);
 
 	_SPI_current->processed = queryDesc->estate->es_processed;
diff --git a/src/backend/nodes/Makefile b/src/backend/nodes/Makefile
index af12c64878..7fb0d2d202 100644
--- a/src/backend/nodes/Makefile
+++ b/src/backend/nodes/Makefile
@@ -52,6 +52,7 @@ node_headers = \
 	access/tsmapi.h \
 	commands/event_trigger.h \
 	commands/trigger.h \
+	executor/execdesc.h \
 	executor/tuptable.h \
 	foreign/fdwapi.h \
 	nodes/bitmapset.h \
diff --git a/src/backend/nodes/gen_node_support.pl b/src/backend/nodes/gen_node_support.pl
index 19ed29657c..69e60206ba 100644
--- a/src/backend/nodes/gen_node_support.pl
+++ b/src/backend/nodes/gen_node_support.pl
@@ -63,6 +63,7 @@ my @all_input_files = qw(
   access/tsmapi.h
   commands/event_trigger.h
   commands/trigger.h
+  executor/execdesc.h
   executor/tuptable.h
   foreign/fdwapi.h
   nodes/bitmapset.h
@@ -87,6 +88,7 @@ my @nodetag_only_files = qw(
   access/tsmapi.h
   commands/event_trigger.h
   commands/trigger.h
+  executor/execdesc.h
   executor/tuptable.h
   foreign/fdwapi.h
   nodes/lockoptions.h
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index db5ff6fdca..670eba3a3a 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -527,6 +527,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
 	result->partPruneInfos = glob->partPruneInfos;
 	result->rtable = glob->finalrtable;
 	result->permInfos = glob->finalrteperminfos;
+	result->viewRelations = glob->viewRelations;
 	result->resultRelations = glob->resultRelations;
 	result->appendRelations = glob->appendRelations;
 	result->subplans = glob->subplans;
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 186fc8014b..454e30e0ca 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -16,6 +16,7 @@
 #include "postgres.h"
 
 #include "access/transam.h"
+#include "catalog/pg_class.h"
 #include "catalog/pg_type.h"
 #include "nodes/makefuncs.h"
 #include "nodes/nodeFuncs.h"
@@ -599,6 +600,10 @@ add_rte_to_flat_rtable(PlannerGlobal *glob, List *rteperminfos,
 		(newrte->rtekind == RTE_SUBQUERY && OidIsValid(newrte->relid)))
 		glob->relationOids = lappend_oid(glob->relationOids, newrte->relid);
 
+	if (newrte->relkind == RELKIND_VIEW)
+		glob->viewRelations = lappend_int(glob->viewRelations,
+										  list_length(glob->finalrtable));
+
 	/*
 	 * Add a copy of the RTEPermissionInfo, if any, corresponding to this RTE
 	 * to the flattened global list.
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index c74bac20b1..29d13e95db 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -1834,11 +1834,10 @@ ApplyRetrieveRule(Query *parsetree,
 
 	/*
 	 * Clear fields that should not be set in a subquery RTE.  Note that we
-	 * leave the relid, rellockmode, and perminfoindex fields set, so that the
-	 * view relation can be appropriately locked before execution and its
-	 * permissions checked.
+	 * leave the relid, relkind, rellockmode, and perminfoindex fields set,
+	 * so that the view relation can be appropriately locked before execution
+	 * and its permissions checked.
 	 */
-	rte->relkind = 0;
 	rte->tablesample = NULL;
 	rte->inh = false;			/* must not be set for a subquery */
 
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index ee9b89a672..c807e9cdcc 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -27,6 +27,7 @@
 #include "storage/procarray.h"
 #include "storage/sinvaladt.h"
 #include "utils/inval.h"
+#include "utils/lsyscache.h"
 
 
 /*
@@ -364,6 +365,50 @@ CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode, bool orstronger)
 	return false;
 }
 
+/*
+ *		CheckRelLockedByMe
+ *
+ * Returns true if current transaction holds a lock on the given relation of
+ * mode 'lockmode'.  If 'orstronger' is true, a stronger lockmode is also OK.
+ * ("Stronger" is defined as "numerically higher", which is a bit
+ * semantically dubious but is OK for the purposes we use this for.)
+ */
+bool
+CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger)
+{
+	Oid			dbId = get_rel_relisshared(relid) ? InvalidOid : MyDatabaseId;
+	LOCKTAG		tag;
+
+	SET_LOCKTAG_RELATION(tag, dbId, relid);
+
+	if (LockHeldByMe(&tag, lockmode))
+		return true;
+
+	if (orstronger)
+	{
+		LOCKMODE	slockmode;
+
+		for (slockmode = lockmode + 1;
+			 slockmode <= MaxLockMode;
+			 slockmode++)
+		{
+			if (LockHeldByMe(&tag, slockmode))
+			{
+#ifdef NOT_USED
+				/* Sometimes this might be useful for debugging purposes */
+				elog(WARNING, "lock mode %s substituted for %s on relation %s",
+					 GetLockmodeName(tag.locktag_lockmethodid, slockmode),
+					 GetLockmodeName(tag.locktag_lockmethodid, lockmode),
+					 RelationGetRelationName(relation));
+#endif
+				return true;
+			}
+		}
+	}
+
+	return false;
+}
+
 /*
  *		LockHasWaitersRelation
  *
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 470b734e9e..34d3f4ff8d 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -1196,6 +1196,7 @@ exec_simple_query(const char *query_string)
 		 * Start the portal.  No parameters here.
 		 */
 		PortalStart(portal, NULL, 0, InvalidSnapshot);
+		Assert(portal->plan_valid);
 
 		/*
 		 * Select the appropriate output format: text unless we are doing a
@@ -1700,6 +1701,7 @@ exec_bind_message(StringInfo input_message)
 						"commands ignored until end of transaction block"),
 				 errdetail_abort()));
 
+replan:
 	/*
 	 * Create the portal.  Allow silent replacement of an existing portal only
 	 * if the unnamed portal is specified.
@@ -1995,6 +1997,12 @@ exec_bind_message(StringInfo input_message)
 	 */
 	PortalStart(portal, params, 0, InvalidSnapshot);
 
+	if (!portal->plan_valid)
+	{
+		PortalDrop(portal, false);
+		goto replan;
+	}
+
 	/*
 	 * Apply the result format requests to the portal.
 	 */
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 5f0248acc5..cf3a9790d6 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -19,6 +19,7 @@
 
 #include "access/xact.h"
 #include "commands/prepare.h"
+#include "executor/execdesc.h"
 #include "executor/tstoreReceiver.h"
 #include "miscadmin.h"
 #include "pg_trace.h"
@@ -35,12 +36,6 @@
 Portal		ActivePortal = NULL;
 
 
-static void ProcessQuery(PlannedStmt *plan,
-						 const char *sourceText,
-						 ParamListInfo params,
-						 QueryEnvironment *queryEnv,
-						 DestReceiver *dest,
-						 QueryCompletion *qc);
 static void FillPortalStore(Portal portal, bool isTopLevel);
 static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
 						   DestReceiver *dest);
@@ -65,6 +60,7 @@ static void DoPortalRewind(Portal portal);
  */
 QueryDesc *
 CreateQueryDesc(PlannedStmt *plannedstmt,
+				CachedPlan *cplan,
 				const char *sourceText,
 				Snapshot snapshot,
 				Snapshot crosscheck_snapshot,
@@ -75,8 +71,10 @@ CreateQueryDesc(PlannedStmt *plannedstmt,
 {
 	QueryDesc  *qd = (QueryDesc *) palloc(sizeof(QueryDesc));
 
+	qd->type = T_QueryDesc;
 	qd->operation = plannedstmt->commandType;	/* operation */
 	qd->plannedstmt = plannedstmt;	/* plan */
+	qd->cplan = cplan;				/* CachedPlan, if plan is from one */
 	qd->sourceText = sourceText;	/* query text */
 	qd->snapshot = RegisterSnapshot(snapshot);	/* snapshot */
 	/* RI check snapshot */
@@ -116,86 +114,6 @@ FreeQueryDesc(QueryDesc *qdesc)
 }
 
 
-/*
- * ProcessQuery
- *		Execute a single plannable query within a PORTAL_MULTI_QUERY,
- *		PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal
- *
- *	plan: the plan tree for the query
- *	sourceText: the source text of the query
- *	params: any parameters needed
- *	dest: where to send results
- *	qc: where to store the command completion status data.
- *
- * qc may be NULL if caller doesn't want a status string.
- *
- * Must be called in a memory context that will be reset or deleted on
- * error; otherwise the executor's memory usage will be leaked.
- */
-static void
-ProcessQuery(PlannedStmt *plan,
-			 const char *sourceText,
-			 ParamListInfo params,
-			 QueryEnvironment *queryEnv,
-			 DestReceiver *dest,
-			 QueryCompletion *qc)
-{
-	QueryDesc  *queryDesc;
-
-	/*
-	 * Create the QueryDesc object
-	 */
-	queryDesc = CreateQueryDesc(plan, sourceText,
-								GetActiveSnapshot(), InvalidSnapshot,
-								dest, params, queryEnv, 0);
-
-	/*
-	 * Call ExecutorStart to prepare the plan for execution
-	 */
-	ExecutorStart(queryDesc, 0);
-
-	/*
-	 * Run the plan to completion.
-	 */
-	ExecutorRun(queryDesc, ForwardScanDirection, 0L, true);
-
-	/*
-	 * Build command completion status data, if caller wants one.
-	 */
-	if (qc)
-	{
-		switch (queryDesc->operation)
-		{
-			case CMD_SELECT:
-				SetQueryCompletion(qc, CMDTAG_SELECT, queryDesc->estate->es_processed);
-				break;
-			case CMD_INSERT:
-				SetQueryCompletion(qc, CMDTAG_INSERT, queryDesc->estate->es_processed);
-				break;
-			case CMD_UPDATE:
-				SetQueryCompletion(qc, CMDTAG_UPDATE, queryDesc->estate->es_processed);
-				break;
-			case CMD_DELETE:
-				SetQueryCompletion(qc, CMDTAG_DELETE, queryDesc->estate->es_processed);
-				break;
-			case CMD_MERGE:
-				SetQueryCompletion(qc, CMDTAG_MERGE, queryDesc->estate->es_processed);
-				break;
-			default:
-				SetQueryCompletion(qc, CMDTAG_UNKNOWN, queryDesc->estate->es_processed);
-				break;
-		}
-	}
-
-	/*
-	 * Now, we close down all the scans and free allocated resources.
-	 */
-	ExecutorFinish(queryDesc);
-	ExecutorEnd(queryDesc);
-
-	FreeQueryDesc(queryDesc);
-}
-
 /*
  * ChoosePortalStrategy
  *		Select portal execution strategy given the intended statement list.
@@ -427,7 +345,8 @@ FetchStatementTargetList(Node *stmt)
  * to be used for cursors).
  *
  * On return, portal is ready to accept PortalRun() calls, and the result
- * tupdesc (if any) is known.
+ * tupdesc (if any) is known, unless portal->plan_valid is set to false, in
+ * which case, the caller must retry after generating a new CachedPlan.
  */
 void
 PortalStart(Portal portal, ParamListInfo params,
@@ -435,7 +354,6 @@ PortalStart(Portal portal, ParamListInfo params,
 {
 	Portal		saveActivePortal;
 	ResourceOwner saveResourceOwner;
-	MemoryContext savePortalContext;
 	MemoryContext oldContext;
 	QueryDesc  *queryDesc;
 	int			myeflags;
@@ -448,15 +366,13 @@ PortalStart(Portal portal, ParamListInfo params,
 	 */
 	saveActivePortal = ActivePortal;
 	saveResourceOwner = CurrentResourceOwner;
-	savePortalContext = PortalContext;
 	PG_TRY();
 	{
 		ActivePortal = portal;
 		if (portal->resowner)
 			CurrentResourceOwner = portal->resowner;
-		PortalContext = portal->portalContext;
 
-		oldContext = MemoryContextSwitchTo(PortalContext);
+		oldContext = MemoryContextSwitchTo(portal->queryContext);
 
 		/* Must remember portal param list, if any */
 		portal->portalParams = params;
@@ -472,6 +388,8 @@ PortalStart(Portal portal, ParamListInfo params,
 		switch (portal->strategy)
 		{
 			case PORTAL_ONE_SELECT:
+			case PORTAL_ONE_RETURNING:
+			case PORTAL_ONE_MOD_WITH:
 
 				/* Must set snapshot before starting executor. */
 				if (snapshot)
@@ -493,6 +411,7 @@ PortalStart(Portal portal, ParamListInfo params,
 				 * the destination to DestNone.
 				 */
 				queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts),
+											portal->cplan,
 											portal->sourceText,
 											GetActiveSnapshot(),
 											InvalidSnapshot,
@@ -501,30 +420,50 @@ PortalStart(Portal portal, ParamListInfo params,
 											portal->queryEnv,
 											0);
 
+				/* Remember for PortalRunMulti() */
+				portal->qdescs = lappend(portal->qdescs, queryDesc);
+
 				/*
 				 * If it's a scrollable cursor, executor needs to support
 				 * REWIND and backwards scan, as well as whatever the caller
 				 * might've asked for.
 				 */
-				if (portal->cursorOptions & CURSOR_OPT_SCROLL)
+				if (portal->strategy == PORTAL_ONE_SELECT &&
+					(portal->cursorOptions & CURSOR_OPT_SCROLL))
 					myeflags = eflags | EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD;
 				else
 					myeflags = eflags;
 
 				/*
-				 * Call ExecutorStart to prepare the plan for execution
+				 * Call ExecutorStart to prepare the plan for execution.  A
+				 * cached plan may get invalidated as we're doing that.
 				 */
 				ExecutorStart(queryDesc, myeflags);
+				if (!queryDesc->plan_valid)
+				{
+					Assert(queryDesc->cplan);
+					PortalQueryFinish(queryDesc);
+					PopActiveSnapshot();
+					portal->plan_valid = false;
+					goto early_exit;
+				}
 
 				/*
-				 * This tells PortalCleanup to shut down the executor
+				 * This tells PortalCleanup to shut down the executor, though
+				 * not needed for queries handled by PortalRunMulti().
 				 */
-				portal->queryDesc = queryDesc;
+				if (portal->strategy == PORTAL_ONE_SELECT)
+					portal->queryDesc = queryDesc;
 
 				/*
-				 * Remember tuple descriptor (computed by ExecutorStart)
+				 * Remember tuple descriptor (computed by ExecutorStart),
+				 * though make it independent of QueryDesc for queries handled
+				 * by PortalRunMulti().
 				 */
-				portal->tupDesc = queryDesc->tupDesc;
+				if (portal->strategy != PORTAL_ONE_SELECT)
+					portal->tupDesc = CreateTupleDescCopy(queryDesc->tupDesc);
+				else
+					portal->tupDesc = queryDesc->tupDesc;
 
 				/*
 				 * Reset cursor position data to "start of query"
@@ -532,33 +471,11 @@ PortalStart(Portal portal, ParamListInfo params,
 				portal->atStart = true;
 				portal->atEnd = false;	/* allow fetches */
 				portal->portalPos = 0;
+				portal->plan_valid = true;
 
 				PopActiveSnapshot();
 				break;
 
-			case PORTAL_ONE_RETURNING:
-			case PORTAL_ONE_MOD_WITH:
-
-				/*
-				 * We don't start the executor until we are told to run the
-				 * portal.  We do need to set up the result tupdesc.
-				 */
-				{
-					PlannedStmt *pstmt;
-
-					pstmt = PortalGetPrimaryStmt(portal);
-					portal->tupDesc =
-						ExecCleanTypeFromTL(pstmt->planTree->targetlist);
-				}
-
-				/*
-				 * Reset cursor position data to "start of query"
-				 */
-				portal->atStart = true;
-				portal->atEnd = false;	/* allow fetches */
-				portal->portalPos = 0;
-				break;
-
 			case PORTAL_UTIL_SELECT:
 
 				/*
@@ -578,11 +495,69 @@ PortalStart(Portal portal, ParamListInfo params,
 				portal->atStart = true;
 				portal->atEnd = false;	/* allow fetches */
 				portal->portalPos = 0;
+				portal->plan_valid = true;
 				break;
 
 			case PORTAL_MULTI_QUERY:
-				/* Need do nothing now */
+				{
+					ListCell   *lc;
+					bool		pushed_active_snapshot = false;
+
+					foreach(lc, portal->stmts)
+					{
+						PlannedStmt *plan = lfirst_node(PlannedStmt, lc);
+						bool		is_utility = (plan->utilityStmt != NULL);
+
+						/* Must set snapshot before starting executor. */
+						if (!pushed_active_snapshot && !is_utility)
+						{
+							PushActiveSnapshot(GetTransactionSnapshot());
+							pushed_active_snapshot = true;
+						}
+
+						/*
+						 * Create the QueryDesc object.  DestReceiver will
+						 * be set in PortalRunMulti().
+						 */
+						queryDesc = CreateQueryDesc(plan, portal->cplan,
+													portal->sourceText,
+													pushed_active_snapshot ?
+													GetActiveSnapshot() :
+													InvalidSnapshot,
+													InvalidSnapshot,
+													NULL,
+													params,
+													portal->queryEnv, 0);
+
+						/* Remember for PortalMultiRun() */
+						portal->qdescs = lappend(portal->qdescs, queryDesc);
+
+						/*
+						 * Call ExecutorStart to prepare the plan for
+						 * execution.  A cached plan may get invalidated as
+						 * we're doing that.
+						 */
+						if (!is_utility)
+						{
+							ExecutorStart(queryDesc, 0);
+							if (!queryDesc->plan_valid)
+							{
+								Assert(queryDesc->cplan);
+								PortalQueryFinish(queryDesc);
+								if (pushed_active_snapshot)
+									PopActiveSnapshot();
+								portal->plan_valid = false;
+								goto early_exit;
+							}
+						}
+					}
+
+					if (pushed_active_snapshot)
+						PopActiveSnapshot();
+				}
+
 				portal->tupDesc = NULL;
+				portal->plan_valid = true;
 				break;
 		}
 	}
@@ -594,19 +569,18 @@ PortalStart(Portal portal, ParamListInfo params,
 		/* Restore global vars and propagate error */
 		ActivePortal = saveActivePortal;
 		CurrentResourceOwner = saveResourceOwner;
-		PortalContext = savePortalContext;
 
 		PG_RE_THROW();
 	}
 	PG_END_TRY();
 
+	portal->status = PORTAL_READY;
+
+early_exit:
 	MemoryContextSwitchTo(oldContext);
 
 	ActivePortal = saveActivePortal;
 	CurrentResourceOwner = saveResourceOwner;
-	PortalContext = savePortalContext;
-
-	portal->status = PORTAL_READY;
 }
 
 /*
@@ -1193,7 +1167,7 @@ PortalRunMulti(Portal portal,
 			   QueryCompletion *qc)
 {
 	bool		active_snapshot_set = false;
-	ListCell   *stmtlist_item;
+	ListCell   *qdesc_item;
 
 	/*
 	 * If the destination is DestRemoteExecute, change to DestNone.  The
@@ -1214,9 +1188,10 @@ PortalRunMulti(Portal portal,
 	 * Loop to handle the individual queries generated from a single parsetree
 	 * by analysis and rewrite.
 	 */
-	foreach(stmtlist_item, portal->stmts)
+	foreach(qdesc_item, portal->qdescs)
 	{
-		PlannedStmt *pstmt = lfirst_node(PlannedStmt, stmtlist_item);
+		QueryDesc *qdesc = lfirst_node(QueryDesc, qdesc_item);
+		PlannedStmt *pstmt = qdesc->plannedstmt;
 
 		/*
 		 * If we got a cancel signal in prior command, quit
@@ -1241,7 +1216,7 @@ PortalRunMulti(Portal portal,
 			 */
 			if (!active_snapshot_set)
 			{
-				Snapshot	snapshot = GetTransactionSnapshot();
+				Snapshot    snapshot = qdesc->snapshot;
 
 				/* If told to, register the snapshot and save in portal */
 				if (setHoldSnapshot)
@@ -1271,23 +1246,38 @@ PortalRunMulti(Portal portal,
 			else
 				UpdateActiveSnapshotCommandId();
 
+			/*
+			 * Run the plan to completion.
+			 */
+			qdesc->dest = dest;
+			ExecutorRun(qdesc, ForwardScanDirection, 0L, true);
+
+			/*
+			 * Build command completion status data if needed.
+			 */
 			if (pstmt->canSetTag)
 			{
-				/* statement can set tag string */
-				ProcessQuery(pstmt,
-							 portal->sourceText,
-							 portal->portalParams,
-							 portal->queryEnv,
-							 dest, qc);
-			}
-			else
-			{
-				/* stmt added by rewrite cannot set tag */
-				ProcessQuery(pstmt,
-							 portal->sourceText,
-							 portal->portalParams,
-							 portal->queryEnv,
-							 altdest, NULL);
+				switch (qdesc->operation)
+				{
+					case CMD_SELECT:
+						SetQueryCompletion(qc, CMDTAG_SELECT, qdesc->estate->es_processed);
+						break;
+					case CMD_INSERT:
+						SetQueryCompletion(qc, CMDTAG_INSERT, qdesc->estate->es_processed);
+						break;
+					case CMD_UPDATE:
+						SetQueryCompletion(qc, CMDTAG_UPDATE, qdesc->estate->es_processed);
+						break;
+					case CMD_DELETE:
+						SetQueryCompletion(qc, CMDTAG_DELETE, qdesc->estate->es_processed);
+						break;
+					case CMD_MERGE:
+						SetQueryCompletion(qc, CMDTAG_MERGE, qdesc->estate->es_processed);
+						break;
+					default:
+						SetQueryCompletion(qc, CMDTAG_UNKNOWN, qdesc->estate->es_processed);
+						break;
+				}
 			}
 
 			if (log_executor_stats)
@@ -1346,8 +1336,19 @@ PortalRunMulti(Portal portal,
 		 * Increment command counter between queries, but not after the last
 		 * one.
 		 */
-		if (lnext(portal->stmts, stmtlist_item) != NULL)
+		if (lnext(portal->qdescs, qdesc_item) != NULL)
 			CommandCounterIncrement();
+
+		/* portal->queryDesc is free'd by PortalCleanup(). */
+		if (qdesc != portal->queryDesc)
+		{
+			if (qdesc->estate)
+			{
+				ExecutorFinish(qdesc);
+				ExecutorEnd(qdesc);
+			}
+			FreeQueryDesc(qdesc);
+		}
 	}
 
 	/* Pop the snapshot if we pushed one. */
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index c07382051d..38ae43e24b 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -2073,6 +2073,27 @@ get_rel_persistence(Oid relid)
 	return result;
 }
 
+/*
+ * get_rel_relisshared
+ *
+ *		Returns if the given relation is shared or not
+ */
+bool
+get_rel_relisshared(Oid relid)
+{
+	HeapTuple	tp;
+	Form_pg_class reltup;
+	bool		result;
+
+	tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
+	if (!HeapTupleIsValid(tp))
+		elog(ERROR, "cache lookup failed for relation %u", relid);
+	reltup = (Form_pg_class) GETSTRUCT(tp);
+	result = reltup->relisshared;
+	ReleaseSysCache(tp);
+
+	return result;
+}
 
 /*				---------- TRANSFORM CACHE ----------						 */
 
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 77c2ba3f8f..4e455d815f 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -100,13 +100,13 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource);
 static List *RevalidateCachedQuery(CachedPlanSource *plansource,
 								   QueryEnvironment *queryEnv);
 static bool CheckCachedPlan(CachedPlanSource *plansource);
+static bool GenericPlanIsValid(CachedPlan *cplan);
 static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
 								   ParamListInfo boundParams, QueryEnvironment *queryEnv);
 static bool choose_custom_plan(CachedPlanSource *plansource,
 							   ParamListInfo boundParams);
 static double cached_plan_cost(CachedPlan *plan, bool include_planner);
 static Query *QueryListGetPrimaryStmt(List *stmts);
-static void AcquireExecutorLocks(List *stmt_list, bool acquire);
 static void AcquirePlannerLocks(List *stmt_list, bool acquire);
 static void ScanQueryForLocks(Query *parsetree, bool acquire);
 static bool ScanQueryWalker(Node *node, bool *acquire);
@@ -787,9 +787,6 @@ RevalidateCachedQuery(CachedPlanSource *plansource,
  *
  * Caller must have already called RevalidateCachedQuery to verify that the
  * querytree is up to date.
- *
- * On a "true" return, we have acquired the locks needed to run the plan.
- * (We must do this for the "true" result to be race-condition-free.)
  */
 static bool
 CheckCachedPlan(CachedPlanSource *plansource)
@@ -803,60 +800,56 @@ CheckCachedPlan(CachedPlanSource *plansource)
 	if (!plan)
 		return false;
 
-	Assert(plan->magic == CACHEDPLAN_MAGIC);
-	/* Generic plans are never one-shot */
-	Assert(!plan->is_oneshot);
+	if (GenericPlanIsValid(plan))
+		return true;
 
 	/*
-	 * If plan isn't valid for current role, we can't use it.
+	 * Plan has been invalidated, so unlink it from the parent and release it.
 	 */
-	if (plan->is_valid && plan->dependsOnRole &&
-		plan->planRoleId != GetUserId())
-		plan->is_valid = false;
+	ReleaseGenericPlan(plansource);
 
-	/*
-	 * If it appears valid, acquire locks and recheck; this is much the same
-	 * logic as in RevalidateCachedQuery, but for a plan.
-	 */
-	if (plan->is_valid)
+	return false;
+}
+
+/*
+ * GenericPlanIsValid
+ *		Is a generic plan still valid?
+ *
+ * It may have gone stale due to concurrent schema modifications of relations
+ * mentioned in the plan or a couple of other things mentioned below.
+ */
+static bool
+GenericPlanIsValid(CachedPlan *cplan)
+{
+	Assert(cplan != NULL);
+	Assert(cplan->magic == CACHEDPLAN_MAGIC);
+	/* Generic plans are never one-shot */
+	Assert(!cplan->is_oneshot);
+
+	if (cplan->is_valid)
 	{
 		/*
 		 * Plan must have positive refcount because it is referenced by
 		 * plansource; so no need to fear it disappears under us here.
 		 */
-		Assert(plan->refcount > 0);
-
-		AcquireExecutorLocks(plan->stmt_list, true);
+		Assert(cplan->refcount > 0);
 
 		/*
-		 * If plan was transient, check to see if TransactionXmin has
-		 * advanced, and if so invalidate it.
+		 * If plan isn't valid for current role, we can't use it.
 		 */
-		if (plan->is_valid &&
-			TransactionIdIsValid(plan->saved_xmin) &&
-			!TransactionIdEquals(plan->saved_xmin, TransactionXmin))
-			plan->is_valid = false;
+		if (cplan->dependsOnRole && cplan->planRoleId != GetUserId())
+			cplan->is_valid = false;
 
 		/*
-		 * By now, if any invalidation has happened, the inval callback
-		 * functions will have marked the plan invalid.
+		 * If plan was transient, check to see if TransactionXmin has
+		 * advanced, and if so invalidate it.
 		 */
-		if (plan->is_valid)
-		{
-			/* Successfully revalidated and locked the query. */
-			return true;
-		}
-
-		/* Oops, the race case happened.  Release useless locks. */
-		AcquireExecutorLocks(plan->stmt_list, false);
+		if (TransactionIdIsValid(cplan->saved_xmin) &&
+			!TransactionIdEquals(cplan->saved_xmin, TransactionXmin))
+			cplan->is_valid = false;
 	}
 
-	/*
-	 * Plan has been invalidated, so unlink it from the parent and release it.
-	 */
-	ReleaseGenericPlan(plansource);
-
-	return false;
+	return cplan->is_valid;
 }
 
 /*
@@ -1126,9 +1119,6 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
  * plan or a custom plan for the given parameters: the caller does not know
  * which it will get.
  *
- * On return, the plan is valid and we have sufficient locks to begin
- * execution.
- *
  * On return, the refcount of the plan has been incremented; a later
  * ReleaseCachedPlan() call is expected.  If "owner" is not NULL then
  * the refcount has been reported to that ResourceOwner (note that this
@@ -1360,8 +1350,8 @@ CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource,
 	}
 
 	/*
-	 * Reject if AcquireExecutorLocks would have anything to do.  This is
-	 * probably unnecessary given the previous check, but let's be safe.
+	 * Reject if the executor would need to take additional locks, that is, in
+	 * addition to those taken by AcquirePlannerLocks() on a given query.
 	 */
 	foreach(lc, plan->stmt_list)
 	{
@@ -1735,58 +1725,6 @@ QueryListGetPrimaryStmt(List *stmts)
 	return NULL;
 }
 
-/*
- * AcquireExecutorLocks: acquire locks needed for execution of a cached plan;
- * or release them if acquire is false.
- */
-static void
-AcquireExecutorLocks(List *stmt_list, bool acquire)
-{
-	ListCell   *lc1;
-
-	foreach(lc1, stmt_list)
-	{
-		PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1);
-		ListCell   *lc2;
-
-		if (plannedstmt->commandType == CMD_UTILITY)
-		{
-			/*
-			 * Ignore utility statements, except those (such as EXPLAIN) that
-			 * contain a parsed-but-not-planned query.  Note: it's okay to use
-			 * ScanQueryForLocks, even though the query hasn't been through
-			 * rule rewriting, because rewriting doesn't change the query
-			 * representation.
-			 */
-			Query	   *query = UtilityContainsQuery(plannedstmt->utilityStmt);
-
-			if (query)
-				ScanQueryForLocks(query, acquire);
-			continue;
-		}
-
-		foreach(lc2, plannedstmt->rtable)
-		{
-			RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2);
-
-			if (!(rte->rtekind == RTE_RELATION ||
-				  (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid))))
-				continue;
-
-			/*
-			 * Acquire the appropriate type of lock on each relation OID. Note
-			 * that we don't actually try to open the rel, and hence will not
-			 * fail if it's been dropped entirely --- we'll just transiently
-			 * acquire a non-conflicting lock.
-			 */
-			if (acquire)
-				LockRelationOid(rte->relid, rte->rellockmode);
-			else
-				UnlockRelationOid(rte->relid, rte->rellockmode);
-		}
-	}
-}
-
 /*
  * AcquirePlannerLocks: acquire locks needed for planning of a querytree list;
  * or release them if acquire is false.
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 06dfa85f04..3ad80c7ecb 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -201,6 +201,10 @@ CreatePortal(const char *name, bool allowDup, bool dupSilent)
 	portal->portalContext = AllocSetContextCreate(TopPortalContext,
 												  "PortalContext",
 												  ALLOCSET_SMALL_SIZES);
+	/* initialize portal's query context to store QueryDescs */
+	portal->queryContext = AllocSetContextCreate(TopPortalContext,
+												 "PortalQueryContext",
+												 ALLOCSET_SMALL_SIZES);
 
 	/* create a resource owner for the portal */
 	portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
@@ -224,6 +228,7 @@ CreatePortal(const char *name, bool allowDup, bool dupSilent)
 
 	/* for named portals reuse portal->name copy */
 	MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
+	MemoryContextSetIdentifier(portal->queryContext, portal->name[0] ? portal->name : "<unnamed>");
 
 	return portal;
 }
@@ -594,6 +599,7 @@ PortalDrop(Portal portal, bool isTopCommit)
 
 	/* release subsidiary storage */
 	MemoryContextDelete(portal->portalContext);
+	MemoryContextDelete(portal->queryContext);
 
 	/* release portal struct (it's in TopPortalContext) */
 	pfree(portal);
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index 7c1071ddd1..da39b2e4ff 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -87,7 +87,11 @@ extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into,
 							  ExplainState *es, const char *queryString,
 							  ParamListInfo params, QueryEnvironment *queryEnv);
 
-extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into,
+extern QueryDesc *ExplainQueryDesc(PlannedStmt *stmt, struct CachedPlan *cplan,
+				 const char *queryString, IntoClause *into, ExplainState *es,
+				 ParamListInfo params, QueryEnvironment *queryEnv);
+extern void ExplainOnePlan(QueryDesc *queryDesc,
+						   IntoClause *into,
 						   ExplainState *es, const char *queryString,
 						   ParamListInfo params, QueryEnvironment *queryEnv,
 						   const instr_time *planduration,
@@ -103,6 +107,7 @@ extern void ExplainQueryParameters(ExplainState *es, ParamListInfo params, int m
 
 extern void ExplainBeginOutput(ExplainState *es);
 extern void ExplainEndOutput(ExplainState *es);
+extern void ExplainResetOutput(ExplainState *es);
 extern void ExplainSeparatePlans(ExplainState *es);
 
 extern void ExplainPropertyList(const char *qlabel, List *data,
diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h
index af2bf36dfb..c36c25b497 100644
--- a/src/include/executor/execdesc.h
+++ b/src/include/executor/execdesc.h
@@ -32,9 +32,12 @@
  */
 typedef struct QueryDesc
 {
+	NodeTag		type;
+
 	/* These fields are provided by CreateQueryDesc */
 	CmdType		operation;		/* CMD_SELECT, CMD_UPDATE, etc. */
 	PlannedStmt *plannedstmt;	/* planner's output (could be utility, too) */
+	struct CachedPlan *cplan;	/* CachedPlan, if plannedstmt is from one */
 	const char *sourceText;		/* source text of the query */
 	Snapshot	snapshot;		/* snapshot to use for query */
 	Snapshot	crosscheck_snapshot;	/* crosscheck for RI update/delete */
@@ -47,6 +50,7 @@ typedef struct QueryDesc
 	TupleDesc	tupDesc;		/* descriptor for result tuples */
 	EState	   *estate;			/* executor's query-wide state */
 	PlanState  *planstate;		/* tree of per-plan-node state */
+	bool		plan_valid;		/* is planstate tree fully valid? */
 
 	/* This field is set by ExecutorRun */
 	bool		already_executed;	/* true if previously executed */
@@ -57,6 +61,7 @@ typedef struct QueryDesc
 
 /* in pquery.c */
 extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt,
+								  struct CachedPlan *cplan,
 								  const char *sourceText,
 								  Snapshot snapshot,
 								  Snapshot crosscheck_snapshot,
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index e7e25c057e..8c680358e8 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -19,6 +19,7 @@
 #include "nodes/lockoptions.h"
 #include "nodes/parsenodes.h"
 #include "utils/memutils.h"
+#include "utils/plancache.h"
 
 
 /*
@@ -59,6 +60,10 @@
 #define EXEC_FLAG_MARK			0x0008	/* need mark/restore */
 #define EXEC_FLAG_SKIP_TRIGGERS 0x0010	/* skip AfterTrigger calls */
 #define EXEC_FLAG_WITH_NO_DATA	0x0020	/* rel scannability doesn't matter */
+#define EXEC_FLAG_GET_LOCKS		0x0400	/* should ExecGetRangeTableRelation
+										 * lock relations? */
+#define EXEC_FLAG_REL_LOCKS		0x8000	/* should ExecCloseRangeTableRelations
+										 * release locks? */
 
 
 /* Hook for plugins to get control in ExecutorStart() */
@@ -245,6 +250,13 @@ extern void ExecEndNode(PlanState *node);
 extern void ExecShutdownNode(PlanState *node);
 extern void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node);
 
+/* Is the cached plan*/
+static inline bool
+ExecPlanStillValid(EState *estate)
+{
+	return estate->es_cachedplan == NULL ? true :
+		CachedPlanStillValid(estate->es_cachedplan);
+}
 
 /* ----------------------------------------------------------------
  *		ExecProcNode
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 20f4c8b35f..89f5a627c8 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -619,6 +619,8 @@ typedef struct EState
 										 * ExecRowMarks, or NULL if none */
 	List	   *es_rteperminfos;	/* List of RTEPermissionInfo */
 	PlannedStmt *es_plannedstmt;	/* link to top of plan tree */
+	struct CachedPlan *es_cachedplan;	/* CachedPlan if plannedstmt is from
+										 * one */
 	List	   *es_part_prune_infos;	/* PlannedStmt.partPruneInfos */
 	const char *es_sourceText;	/* Source text from QueryDesc */
 
diff --git a/src/include/nodes/meson.build b/src/include/nodes/meson.build
index efe0834afb..a8fdd9e176 100644
--- a/src/include/nodes/meson.build
+++ b/src/include/nodes/meson.build
@@ -13,6 +13,7 @@ node_support_input_i = [
   'access/tsmapi.h',
   'commands/event_trigger.h',
   'commands/trigger.h',
+  'executor/execdesc.h',
   'executor/tuptable.h',
   'foreign/fdwapi.h',
   'nodes/bitmapset.h',
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index 0d4b1ec4e4..71004fee75 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -116,6 +116,9 @@ typedef struct PlannerGlobal
 	/* "flat" list of RTEPermissionInfos */
 	List	   *finalrteperminfos;
 
+	/* "flat" list of integer RT indexes */
+	List	   *viewRelations;
+
 	/* "flat" list of PlanRowMarks */
 	List	   *finalrowmarks;
 
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 4781a9c632..da9e73fb16 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -78,6 +78,9 @@ typedef struct PlannedStmt
 	List	   *permInfos;		/* list of RTEPermissionInfo nodes for rtable
 								 * entries needing one */
 
+	List	   *viewRelations;	/* integer list of RT indexes, or NIL if no
+								 * views are queried */
+
 	/* rtable indexes of target relations for INSERT/UPDATE/DELETE/MERGE */
 	List	   *resultRelations;	/* integer list of RT indexes, or NIL */
 
diff --git a/src/include/storage/lmgr.h b/src/include/storage/lmgr.h
index 4ee91e3cf9..598bf2688a 100644
--- a/src/include/storage/lmgr.h
+++ b/src/include/storage/lmgr.h
@@ -48,6 +48,7 @@ extern bool ConditionalLockRelation(Relation relation, LOCKMODE lockmode);
 extern void UnlockRelation(Relation relation, LOCKMODE lockmode);
 extern bool CheckRelationLockedByMe(Relation relation, LOCKMODE lockmode,
 									bool orstronger);
+extern bool CheckRelLockedByMe(Oid relid, LOCKMODE lockmode, bool orstronger);
 extern bool LockHasWaitersRelation(Relation relation, LOCKMODE lockmode);
 
 extern void LockRelationIdForSession(LockRelId *relid, LOCKMODE lockmode);
diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h
index 4f5418b972..3074e604dd 100644
--- a/src/include/utils/lsyscache.h
+++ b/src/include/utils/lsyscache.h
@@ -139,6 +139,7 @@ extern char get_rel_relkind(Oid relid);
 extern bool get_rel_relispartition(Oid relid);
 extern Oid	get_rel_tablespace(Oid relid);
 extern char get_rel_persistence(Oid relid);
+extern bool get_rel_relisshared(Oid relid);
 extern Oid	get_transform_fromsql(Oid typid, Oid langid, List *trftypes);
 extern Oid	get_transform_tosql(Oid typid, Oid langid, List *trftypes);
 extern bool get_typisdefined(Oid typid);
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index a443181d41..c2e485ac2c 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -221,6 +221,21 @@ extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource,
 								 ParamListInfo boundParams,
 								 ResourceOwner owner,
 								 QueryEnvironment *queryEnv);
+
+/*
+ * CachedPlanStillValid
+ *		Returns if a cached generic plan is still valid
+ *
+ * Called by the executor after it has finished taking locks on a plan tree
+ * in a CachedPlan.
+ */
+static inline bool
+CachedPlanStillValid(CachedPlan *cplan)
+{
+	return cplan->is_valid;
+}
+
+extern bool CachedPlanStillValid(CachedPlan *cplan);
 extern void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner);
 
 extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource,
diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h
index aa08b1e0fc..332a08ccb4 100644
--- a/src/include/utils/portal.h
+++ b/src/include/utils/portal.h
@@ -138,6 +138,9 @@ typedef struct PortalData
 	QueryCompletion qc;			/* command completion data for executed query */
 	List	   *stmts;			/* list of PlannedStmts */
 	CachedPlan *cplan;			/* CachedPlan, if stmts are from one */
+	List	   *qdescs;			/* list of QueryDescs */
+	bool		plan_valid;		/* are plan(s) ready for execution? */
+	MemoryContext queryContext;	/* memory for QueryDescs and children */
 
 	ParamListInfo portalParams; /* params to pass to query */
 	QueryEnvironment *queryEnv; /* environment for query */
@@ -242,6 +245,7 @@ extern void PortalDefineQuery(Portal portal,
 							  CommandTag commandTag,
 							  List *stmts,
 							  CachedPlan *cplan);
+extern void PortalQueryFinish(QueryDesc *queryDesc);
 extern PlannedStmt *PortalGetPrimaryStmt(Portal portal);
 extern void PortalCreateHoldStore(Portal portal);
 extern void PortalHashTableDeleteAll(void);
diff --git a/src/test/modules/delay_execution/Makefile b/src/test/modules/delay_execution/Makefile
index 70f24e846d..2fca84d027 100644
--- a/src/test/modules/delay_execution/Makefile
+++ b/src/test/modules/delay_execution/Makefile
@@ -8,7 +8,8 @@ OBJS = \
 	delay_execution.o
 
 ISOLATION = partition-addition \
-	    partition-removal-1
+	    partition-removal-1 \
+		cached-plan-replan
 
 ifdef USE_PGXS
 PG_CONFIG = pg_config
diff --git a/src/test/modules/delay_execution/delay_execution.c b/src/test/modules/delay_execution/delay_execution.c
index 7cd76eb34b..5d7a3e9858 100644
--- a/src/test/modules/delay_execution/delay_execution.c
+++ b/src/test/modules/delay_execution/delay_execution.c
@@ -1,14 +1,18 @@
 /*-------------------------------------------------------------------------
  *
  * delay_execution.c
- *		Test module to allow delay between parsing and execution of a query.
+ *		Test module to introduce delay at various points during execution of a
+ *		query to test that execution proceeds safely in light of concurrent
+ *		changes.
  *
  * The delay is implemented by taking and immediately releasing a specified
  * advisory lock.  If another process has previously taken that lock, the
  * current process will be blocked until the lock is released; otherwise,
  * there's no effect.  This allows an isolationtester script to reliably
- * test behaviors where some specified action happens in another backend
- * between parsing and execution of any desired query.
+ * test behaviors where some specified action happens in another backend in
+ * a couple of cases: 1) between parsing and execution of any desired query
+ * when using the planner_hook, 2) between RevalidateCachedQuery() and
+ * ExecutorStart() when using the ExecutorStart_hook.
  *
  * Copyright (c) 2020-2023, PostgreSQL Global Development Group
  *
@@ -22,6 +26,7 @@
 
 #include <limits.h>
 
+#include "executor/executor.h"
 #include "optimizer/planner.h"
 #include "utils/builtins.h"
 #include "utils/guc.h"
@@ -32,9 +37,11 @@ PG_MODULE_MAGIC;
 
 /* GUC: advisory lock ID to use.  Zero disables the feature. */
 static int	post_planning_lock_id = 0;
+static int	executor_start_lock_id = 0;
 
-/* Save previous planner hook user to be a good citizen */
+/* Save previous hook users to be a good citizen */
 static planner_hook_type prev_planner_hook = NULL;
+static ExecutorStart_hook_type prev_ExecutorStart_hook = NULL;
 
 
 /* planner_hook function to provide the desired delay */
@@ -70,11 +77,41 @@ delay_execution_planner(Query *parse, const char *query_string,
 	return result;
 }
 
+/* planner_hook function to provide the desired delay */
+static void
+delay_execution_ExecutorStart(QueryDesc *queryDesc, int eflags)
+{
+	/* If enabled, delay by taking and releasing the specified lock */
+	if (executor_start_lock_id != 0)
+	{
+		DirectFunctionCall1(pg_advisory_lock_int8,
+							Int64GetDatum((int64) executor_start_lock_id));
+		DirectFunctionCall1(pg_advisory_unlock_int8,
+							Int64GetDatum((int64) executor_start_lock_id));
+
+		/*
+		 * Ensure that we notice any pending invalidations, since the advisory
+		 * lock functions don't do this.
+		 */
+		AcceptInvalidationMessages();
+	}
+
+	/* Now start the executor, possibly via a previous hook user */
+	if (prev_ExecutorStart_hook)
+		prev_ExecutorStart_hook(queryDesc, eflags);
+	else
+		standard_ExecutorStart(queryDesc, eflags);
+
+	if (executor_start_lock_id != 0)
+		elog(NOTICE, "Finished ExecutorStart(): CachedPlan is %s",
+			 queryDesc->cplan->is_valid ? "valid" : "not valid");
+}
+
 /* Module load function */
 void
 _PG_init(void)
 {
-	/* Set up the GUC to control which lock is used */
+	/* Set up GUCs to control which lock is used */
 	DefineCustomIntVariable("delay_execution.post_planning_lock_id",
 							"Sets the advisory lock ID to be locked/unlocked after planning.",
 							"Zero disables the delay.",
@@ -86,10 +123,22 @@ _PG_init(void)
 							NULL,
 							NULL,
 							NULL);
-
+	DefineCustomIntVariable("delay_execution.executor_start_lock_id",
+							"Sets the advisory lock ID to be locked/unlocked before starting execution.",
+							"Zero disables the delay.",
+							&executor_start_lock_id,
+							0,
+							0, INT_MAX,
+							PGC_USERSET,
+							0,
+							NULL,
+							NULL,
+							NULL);
 	MarkGUCPrefixReserved("delay_execution");
 
-	/* Install our hook */
+	/* Install our hooks. */
 	prev_planner_hook = planner_hook;
 	planner_hook = delay_execution_planner;
+	prev_ExecutorStart_hook = ExecutorStart_hook;
+	ExecutorStart_hook = delay_execution_ExecutorStart;
 }
diff --git a/src/test/modules/delay_execution/expected/cached-plan-replan.out b/src/test/modules/delay_execution/expected/cached-plan-replan.out
new file mode 100644
index 0000000000..eaac55122b
--- /dev/null
+++ b/src/test/modules/delay_execution/expected/cached-plan-replan.out
@@ -0,0 +1,43 @@
+Parsed test spec with 2 sessions
+
+starting permutation: s1prep s2lock s1exec s2dropi s2unlock
+step s1prep: SET plan_cache_mode = force_generic_plan;
+		  PREPARE q AS SELECT * FROM foov WHERE a = $1;
+		  EXPLAIN (COSTS OFF) EXECUTE q (1);
+QUERY PLAN                             
+---------------------------------------
+Append                                 
+  Subplans Removed: 1                  
+  ->  Bitmap Heap Scan on foo1 foo_1   
+        Recheck Cond: (a = $1)         
+        ->  Bitmap Index Scan on foo1_a
+              Index Cond: (a = $1)     
+(6 rows)
+
+step s2lock: SELECT pg_advisory_lock(12345);
+pg_advisory_lock
+----------------
+                
+(1 row)
+
+step s1exec: LOAD 'delay_execution';
+		  SET delay_execution.executor_start_lock_id = 12345;
+		  EXPLAIN (COSTS OFF) EXECUTE q (1); <waiting ...>
+step s2dropi: DROP INDEX foo1_a;
+step s2unlock: SELECT pg_advisory_unlock(12345);
+pg_advisory_unlock
+------------------
+t                 
+(1 row)
+
+step s1exec: <... completed>
+s1: NOTICE:  Finished ExecutorStart(): CachedPlan is not valid
+s1: NOTICE:  Finished ExecutorStart(): CachedPlan is valid
+QUERY PLAN                  
+----------------------------
+Append                      
+  Subplans Removed: 1       
+  ->  Seq Scan on foo1 foo_1
+        Filter: (a = $1)    
+(4 rows)
+
diff --git a/src/test/modules/delay_execution/specs/cached-plan-replan.spec b/src/test/modules/delay_execution/specs/cached-plan-replan.spec
new file mode 100644
index 0000000000..5bd5fdbf1c
--- /dev/null
+++ b/src/test/modules/delay_execution/specs/cached-plan-replan.spec
@@ -0,0 +1,39 @@
+# Test to check that invalidation of a cached plan during ExecutorStart
+# correctly triggers replanning and re-execution.
+
+setup
+{
+  CREATE TABLE foo (a int, b text) PARTITION BY LIST(a);
+  CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1);
+  CREATE INDEX foo1_a ON foo1 (a);
+  CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2);
+  CREATE VIEW foov AS SELECT * FROM foo;
+}
+
+teardown
+{
+  DROP VIEW foov;
+  DROP TABLE foo;
+}
+
+session "s1"
+# Creates a prepared statement and forces creation of a generic plan
+step "s1prep"   { SET plan_cache_mode = force_generic_plan;
+		  PREPARE q AS SELECT * FROM foov WHERE a = $1;
+		  EXPLAIN (COSTS OFF) EXECUTE q (1); }
+# Executes a generic plan
+step "s1exec"	{ LOAD 'delay_execution';
+		  SET delay_execution.executor_start_lock_id = 12345;
+		  EXPLAIN (COSTS OFF) EXECUTE q (1); }
+
+session "s2"
+step "s2lock"	{ SELECT pg_advisory_lock(12345); }
+step "s2unlock"	{ SELECT pg_advisory_unlock(12345); }
+step "s2dropi"	{ DROP INDEX foo1_a; }
+
+# While "s1exec" waits to acquire the advisory lock, "s2drop" is able to drop
+# the index being used in the cached plan for `q`, so when "s1exec" is then
+# unblocked and initializes the cached plan for execution, it detects the
+# concurrent index drop and causes the cached plan to be discarded and
+# recreated without the index.
+permutation "s1prep" "s2lock" "s1exec" "s2dropi" "s2unlock"
-- 
2.35.3

