From baee6cd9e575728650813152af0d4d2d9c96674f Mon Sep 17 00:00:00 2001
From: Robert Haas <rhaas@postgresql.org>
Date: Mon, 26 Aug 2024 12:27:08 -0400
Subject: [PATCH v2 1/2] Convert enable_* GUCs into per-RelOptInfo values with
 GUCs setting defaults.

---
 contrib/postgres_fdw/postgres_fdw.c     |   5 +-
 src/backend/optimizer/path/allpaths.c   |  15 ++--
 src/backend/optimizer/path/costsize.c   | 107 +++++++++++++++++-------
 src/backend/optimizer/path/indxpath.c   |   2 +-
 src/backend/optimizer/path/joinpath.c   |  53 ++++++------
 src/backend/optimizer/path/pathkeys.c   |   3 +-
 src/backend/optimizer/path/tidpath.c    |  11 +--
 src/backend/optimizer/plan/createplan.c |  26 +++---
 src/backend/optimizer/plan/planner.c    |  58 ++++++++-----
 src/backend/optimizer/plan/subselect.c  |   9 +-
 src/backend/optimizer/prep/prepunion.c  |  20 +++--
 src/backend/optimizer/util/pathnode.c   |  21 ++---
 src/backend/optimizer/util/plancat.c    |   3 +-
 src/backend/optimizer/util/relnode.c    |   6 +-
 src/backend/utils/misc/guc_tables.c     |  37 ++++----
 src/include/nodes/pathnodes.h           |  29 +++++++
 src/include/optimizer/cost.h            |  29 ++++++-
 src/include/optimizer/planmain.h        |   2 +-
 18 files changed, 288 insertions(+), 148 deletions(-)

diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index adc62576d1..1df4ddf268 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -439,6 +439,7 @@ static void get_remote_estimate(const char *sql,
 								Cost *startup_cost,
 								Cost *total_cost);
 static void adjust_foreign_grouping_path_cost(PlannerInfo *root,
+											  RelOptInfo *rel,
 											  List *pathkeys,
 											  double retrieved_rows,
 											  double width,
@@ -3489,7 +3490,7 @@ estimate_path_cost_size(PlannerInfo *root,
 			{
 				Assert(foreignrel->reloptkind == RELOPT_UPPER_REL &&
 					   fpinfo->stage == UPPERREL_GROUP_AGG);
-				adjust_foreign_grouping_path_cost(root, pathkeys,
+				adjust_foreign_grouping_path_cost(root, foreignrel, pathkeys,
 												  retrieved_rows, width,
 												  fpextra->limit_tuples,
 												  &disabled_nodes,
@@ -3644,6 +3645,7 @@ get_remote_estimate(const char *sql, PGconn *conn,
  */
 static void
 adjust_foreign_grouping_path_cost(PlannerInfo *root,
+								  RelOptInfo *rel,
 								  List *pathkeys,
 								  double retrieved_rows,
 								  double width,
@@ -3667,6 +3669,7 @@ adjust_foreign_grouping_path_cost(PlannerInfo *root,
 
 		cost_sort(&sort_path,
 				  root,
+				  rel,
 				  pathkeys,
 				  0,
 				  *p_startup_cost + *p_run_cost,
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 057b4b79eb..8645244f84 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -970,7 +970,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
 	 * flag; currently, we only consider partitionwise joins with the baserel
 	 * if its targetlist doesn't contain a whole-row Var.
 	 */
-	if (enable_partitionwise_join &&
+	if (REL_CAN_USE_PATH(rel, PartitionwiseJoin) &&
 		rel->reloptkind == RELOPT_BASEREL &&
 		rte->relkind == RELKIND_PARTITIONED_TABLE &&
 		bms_is_empty(rel->attr_needed[InvalidAttrNumber - rel->min_attr]))
@@ -1325,7 +1325,8 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
 	double		partial_rows = -1;
 
 	/* If appropriate, consider parallel append */
-	pa_subpaths_valid = enable_parallel_append && rel->consider_parallel;
+	pa_subpaths_valid = REL_CAN_USE_PATH(rel, ParallelAppend)
+		&& rel->consider_parallel;
 
 	/*
 	 * For every non-dummy child, remember the cheapest path.  Also, identify
@@ -1535,7 +1536,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
 		 * partitions vs. an unpartitioned table with the same data, so the
 		 * use of some kind of log-scaling here seems to make some sense.
 		 */
-		if (enable_parallel_append)
+		if (REL_CAN_USE_PATH(rel, ParallelAppend))
 		{
 			parallel_workers = Max(parallel_workers,
 								   pg_leftmost_one_pos32(list_length(live_childrels)) + 1);
@@ -1547,7 +1548,7 @@ add_paths_to_append_rel(PlannerInfo *root, RelOptInfo *rel,
 		/* Generate a partial append path. */
 		appendpath = create_append_path(root, rel, NIL, partial_subpaths,
 										NIL, NULL, parallel_workers,
-										enable_parallel_append,
+										REL_CAN_USE_PATH(rel, ParallelAppend),
 										-1);
 
 		/*
@@ -3259,7 +3260,8 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
 			 * input path).
 			 */
 			if (subpath != cheapest_partial_path &&
-				(presorted_keys == 0 || !enable_incremental_sort))
+				(presorted_keys == 0 ||
+				 !REL_CAN_USE_PATH(rel, IncrementalSort)))
 				continue;
 
 			/*
@@ -3274,7 +3276,8 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
 			 * output. Here we add an explicit sort to match the useful
 			 * ordering.
 			 */
-			if (presorted_keys == 0 || !enable_incremental_sort)
+			if (presorted_keys == 0 ||
+				!REL_CAN_USE_PATH(rel, IncrementalSort))
 			{
 				subpath = (Path *) create_sort_path(root,
 													rel,
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index e1523d15df..3dfa22fdcd 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -163,6 +163,7 @@ bool		enable_parallel_hash = true;
 bool		enable_partition_pruning = true;
 bool		enable_presorted_aggregate = true;
 bool		enable_async_append = true;
+uint32		default_path_type_mask = PathTypeMaskAll;
 
 typedef struct
 {
@@ -283,6 +284,38 @@ clamp_cardinality_to_long(Cardinality x)
 	return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
 }
 
+/*
+ * Define assign hooks for each enable_<whatever> GUC that affects
+ * default_path_type_mask. These are all basically identical, so we use
+ * a templating macro to define them.
+ */
+#define define_assign_hook(gucname, type) \
+	void \
+	gucname ## _assign_hook(bool newval, void *extra) \
+	{ \
+		if (newval) \
+			default_path_type_mask |= PathType ## type; \
+		else \
+			default_path_type_mask &= ~(PathType ## type); \
+	}
+define_assign_hook(enable_bitmapscan, BitmapScan)
+define_assign_hook(enable_gathermerge, GatherMerge)
+define_assign_hook(enable_hashagg, HashAgg)
+define_assign_hook(enable_hashjoin, HashJoin)
+define_assign_hook(enable_incremental_sort, IncrementalSort)
+define_assign_hook(enable_indexscan, IndexScan)
+define_assign_hook(enable_indexonlyscan, IndexOnlyScan)
+define_assign_hook(enable_material, Material)
+define_assign_hook(enable_memoize, Memoize)
+define_assign_hook(enable_mergejoin, MergeJoin)
+define_assign_hook(enable_nestloop, NestLoop)
+define_assign_hook(enable_parallel_append, ParallelAppend)
+define_assign_hook(enable_parallel_hash, ParallelHash)
+define_assign_hook(enable_partitionwise_join, PartitionwiseJoin)
+define_assign_hook(enable_partitionwise_aggregate, PartitionwiseAggregate)
+define_assign_hook(enable_seqscan, SeqScan)
+define_assign_hook(enable_sort, Sort)
+define_assign_hook(enable_tidscan, TIDScan)
 
 /*
  * cost_seqscan
@@ -354,7 +387,7 @@ cost_seqscan(Path *path, PlannerInfo *root,
 		path->rows = clamp_row_est(path->rows / parallel_divisor);
 	}
 
-	path->disabled_nodes = enable_seqscan ? 0 : 1;
+	path->disabled_nodes = REL_CAN_USE_PATH(baserel, SeqScan) ? 0 : 1;
 	path->startup_cost = startup_cost;
 	path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
 }
@@ -533,7 +566,7 @@ cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
 	run_cost += parallel_tuple_cost * path->path.rows * 1.05;
 
 	path->path.disabled_nodes = input_disabled_nodes
-		+ (enable_gathermerge ? 0 : 1);
+		+ (REL_CAN_USE_PATH(rel, GatherMerge) ? 0 : 1);
 	path->path.startup_cost = startup_cost + input_startup_cost;
 	path->path.total_cost = (startup_cost + run_cost + input_total_cost);
 }
@@ -615,7 +648,8 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
 	}
 
 	/* we don't need to check enable_indexonlyscan; indxpath.c does that */
-	path->path.disabled_nodes = enable_indexscan ? 0 : 1;
+	path->path.disabled_nodes =
+		REL_CAN_USE_PATH(baserel, IndexScan) ? 0 : 1;
 
 	/*
 	 * Call index-access-method-specific code to estimate the processing cost
@@ -1109,7 +1143,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
 	startup_cost += path->pathtarget->cost.startup;
 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
 
-	path->disabled_nodes = enable_bitmapscan ? 0 : 1;
+	path->disabled_nodes = REL_CAN_USE_PATH(baserel, BitmapScan) ? 0 : 1;
 	path->startup_cost = startup_cost;
 	path->total_cost = startup_cost + run_cost;
 }
@@ -1287,10 +1321,11 @@ cost_tidscan(Path *path, PlannerInfo *root,
 
 		/*
 		 * We must use a TID scan for CurrentOfExpr; in any other case, we
-		 * should be generating a TID scan only if enable_tidscan=true. Also,
-		 * if CurrentOfExpr is the qual, there should be only one.
+		 * should be generating a TID scan only if this TID scans are enabled
+		 * for this rel. Also, if CurrentOfExpr is the qual, there should be
+		 * only one.
 		 */
-		Assert(enable_tidscan || IsA(qual, CurrentOfExpr));
+		Assert(REL_CAN_USE_PATH(baserel, TIDScan) || IsA(qual, CurrentOfExpr));
 		Assert(list_length(tidquals) == 1 || !IsA(qual, CurrentOfExpr));
 
 		if (IsA(qual, ScalarArrayOpExpr))
@@ -1342,8 +1377,8 @@ cost_tidscan(Path *path, PlannerInfo *root,
 
 	/*
 	 * There are assertions above verifying that we only reach this function
-	 * either when enable_tidscan=true or when the TID scan is the only legal
-	 * path, so it's safe to set disabled_nodes to zero here.
+	 * either when TID scans are enabled for this rel or when a TID scan is
+	 * the only legal path, so it's safe to set disabled_nodes to zero here.
 	 */
 	path->disabled_nodes = 0;
 	path->startup_cost = startup_cost;
@@ -1438,8 +1473,8 @@ cost_tidrangescan(Path *path, PlannerInfo *root,
 	startup_cost += path->pathtarget->cost.startup;
 	run_cost += path->pathtarget->cost.per_tuple * path->rows;
 
-	/* we should not generate this path type when enable_tidscan=false */
-	Assert(enable_tidscan);
+	/* we should not generate this path type when TID scans are disabled */
+	Assert(REL_CAN_USE_PATH(baserel, TIDScan));
 	path->disabled_nodes = 0;
 	path->startup_cost = startup_cost;
 	path->total_cost = startup_cost + run_cost;
@@ -2120,8 +2155,11 @@ cost_incremental_sort(Path *path,
 
 	path->rows = input_tuples;
 
-	/* should not generate these paths when enable_incremental_sort=false */
-	Assert(enable_incremental_sort);
+	/*
+	 * If incremental sort is not enabled here, we should not have generated a
+	 * path of this type.
+	 */
+	Assert(REL_CAN_USE_PATH(path->parent, IncrementalSort));
 	path->disabled_nodes = input_disabled_nodes;
 
 	path->startup_cost = startup_cost;
@@ -2141,7 +2179,7 @@ cost_incremental_sort(Path *path,
  * of sort keys, which all callers *could* supply.)
  */
 void
-cost_sort(Path *path, PlannerInfo *root,
+cost_sort(Path *path, PlannerInfo *root, RelOptInfo *rel,
 		  List *pathkeys, int input_disabled_nodes,
 		  Cost input_cost, double tuples, int width,
 		  Cost comparison_cost, int sort_mem,
@@ -2159,7 +2197,8 @@ cost_sort(Path *path, PlannerInfo *root,
 	startup_cost += input_cost;
 
 	path->rows = tuples;
-	path->disabled_nodes = input_disabled_nodes + (enable_sort ? 0 : 1);
+	path->disabled_nodes = input_disabled_nodes +
+		(REL_CAN_USE_PATH(rel, Sort) ? 0 : 1);
 	path->startup_cost = startup_cost;
 	path->total_cost = startup_cost + run_cost;
 }
@@ -2321,6 +2360,7 @@ cost_append(AppendPath *apath)
 					 */
 					cost_sort(&sort_path,
 							  NULL, /* doesn't currently need root */
+							  apath->path.parent,
 							  pathkeys,
 							  subpath->disabled_nodes,
 							  subpath->total_cost,
@@ -2480,7 +2520,7 @@ cost_merge_append(Path *path, PlannerInfo *root,
  * occur only on rescan, which is estimated in cost_rescan.
  */
 void
-cost_material(Path *path,
+cost_material(Path *path, RelOptInfo *rel,
 			  int input_disabled_nodes,
 			  Cost input_startup_cost, Cost input_total_cost,
 			  double tuples, int width)
@@ -2519,7 +2559,8 @@ cost_material(Path *path,
 		run_cost += seq_page_cost * npages;
 	}
 
-	path->disabled_nodes = input_disabled_nodes + (enable_material ? 0 : 1);
+	path->disabled_nodes = input_disabled_nodes +
+		(REL_CAN_USE_PATH(rel, Material) ? 0 : 1);
 	path->startup_cost = startup_cost;
 	path->total_cost = startup_cost + run_cost;
 }
@@ -2679,7 +2720,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
  * are for appropriately-sorted input.
  */
 void
-cost_agg(Path *path, PlannerInfo *root,
+cost_agg(Path *path, PlannerInfo *root, RelOptInfo *rel,
 		 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
 		 int numGroupCols, double numGroups,
 		 List *quals,
@@ -2738,7 +2779,7 @@ cost_agg(Path *path, PlannerInfo *root,
 		/* Here we are able to deliver output on-the-fly */
 		startup_cost = input_startup_cost;
 		total_cost = input_total_cost;
-		if (aggstrategy == AGG_MIXED && !enable_hashagg)
+		if (aggstrategy == AGG_MIXED && !REL_CAN_USE_PATH(rel, HashAgg))
 			++disabled_nodes;
 		/* calcs phrased this way to match HASHED case, see note above */
 		total_cost += aggcosts->transCost.startup;
@@ -2753,7 +2794,7 @@ cost_agg(Path *path, PlannerInfo *root,
 	{
 		/* must be AGG_HASHED */
 		startup_cost = input_total_cost;
-		if (!enable_hashagg)
+		if (!REL_CAN_USE_PATH(rel, HashAgg))
 			++disabled_nodes;
 		startup_cost += aggcosts->transCost.startup;
 		startup_cost += aggcosts->transCost.per_tuple * input_tuples;
@@ -3266,7 +3307,7 @@ cost_group(Path *path, PlannerInfo *root,
  */
 void
 initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
-					  JoinType jointype,
+					  RelOptInfo *joinrel, JoinType jointype,
 					  Path *outer_path, Path *inner_path,
 					  JoinPathExtraData *extra)
 {
@@ -3280,7 +3321,7 @@ initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
 	Cost		inner_rescan_run_cost;
 
 	/* Count up disabled nodes. */
-	disabled_nodes = enable_nestloop ? 0 : 1;
+	disabled_nodes = REL_CAN_USE_PATH(joinrel, NestLoop) ? 0 : 1;
 	disabled_nodes += inner_path->disabled_nodes;
 	disabled_nodes += outer_path->disabled_nodes;
 
@@ -3549,7 +3590,7 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
  */
 void
 initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
-					   JoinType jointype,
+					   RelOptInfo *joinrel, JoinType jointype,
 					   List *mergeclauses,
 					   Path *outer_path, Path *inner_path,
 					   List *outersortkeys, List *innersortkeys,
@@ -3676,7 +3717,7 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
 	Assert(outerstartsel <= outerendsel);
 	Assert(innerstartsel <= innerendsel);
 
-	disabled_nodes = enable_mergejoin ? 0 : 1;
+	disabled_nodes = REL_CAN_USE_PATH(joinrel, MergeJoin) ? 0 : 1;
 
 	/* cost of source data */
 
@@ -3684,6 +3725,7 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
 	{
 		cost_sort(&sort_path,
 				  root,
+				  outer_path->parent,
 				  outersortkeys,
 				  outer_path->disabled_nodes,
 				  outer_path->total_cost,
@@ -3713,6 +3755,7 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
 	{
 		cost_sort(&sort_path,
 				  root,
+				  inner_path->parent,
 				  innersortkeys,
 				  inner_path->disabled_nodes,
 				  inner_path->total_cost,
@@ -3793,6 +3836,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
 					 JoinCostWorkspace *workspace,
 					 JoinPathExtraData *extra)
 {
+	RelOptInfo *joinrel = path->jpath.path.parent;
 	Path	   *outer_path = path->jpath.outerjoinpath;
 	Path	   *inner_path = path->jpath.innerjoinpath;
 	double		inner_path_rows = inner_path->rows;
@@ -3946,7 +3990,8 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
 	 * Prefer materializing if it looks cheaper, unless the user has asked to
 	 * suppress materialization.
 	 */
-	else if (enable_material && mat_inner_cost < bare_inner_cost)
+	else if (REL_CAN_USE_PATH(joinrel, Material) &&
+			 mat_inner_cost < bare_inner_cost)
 		path->materialize_inner = true;
 
 	/*
@@ -3961,7 +4006,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
 	 * selected as the input of a mergejoin, and they don't support
 	 * mark/restore at present.
 	 *
-	 * We don't test the value of enable_material here, because
+	 * We don't test whether a MaterialPath is allowed here, because
 	 * materialization is required for correctness in this case, and turning
 	 * it off does not entitle us to deliver an invalid plan.
 	 */
@@ -3977,10 +4022,10 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path,
 	 * though.
 	 *
 	 * Since materialization is a performance optimization in this case,
-	 * rather than necessary for correctness, we skip it if enable_material is
-	 * off.
+	 * rather than necessary for correctness, we skip it if MaterialPath is
+	 * not allowed here.
 	 */
-	else if (enable_material && innersortkeys != NIL &&
+	else if (REL_CAN_USE_PATH(joinrel, Material) && innersortkeys != NIL &&
 			 relation_byte_size(inner_path_rows,
 								inner_path->pathtarget->width) >
 			 (work_mem * 1024L))
@@ -4113,7 +4158,7 @@ cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
  */
 void
 initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
-					  JoinType jointype,
+					  RelOptInfo *joinrel, JoinType jointype,
 					  List *hashclauses,
 					  Path *outer_path, Path *inner_path,
 					  JoinPathExtraData *extra,
@@ -4132,7 +4177,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
 	size_t		space_allowed;	/* unused */
 
 	/* Count up disabled nodes. */
-	disabled_nodes = enable_hashjoin ? 0 : 1;
+	disabled_nodes = REL_CAN_USE_PATH(joinrel, HashJoin) ? 0 : 1;
 	disabled_nodes += inner_path->disabled_nodes;
 	disabled_nodes += outer_path->disabled_nodes;
 
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index c0fcc7d78d..be183db4ce 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -1736,7 +1736,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
 	int			i;
 
 	/* Index-only scans must be enabled */
-	if (!enable_indexonlyscan)
+	if (!REL_CAN_USE_PATH(rel, IndexOnlyScan))
 		return false;
 
 	/*
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index b0e8c94dfc..5f0040cd1b 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -205,10 +205,10 @@ add_paths_to_joinrel(PlannerInfo *root,
 	/*
 	 * Find potential mergejoin clauses.  We can skip this if we are not
 	 * interested in doing a mergejoin.  However, mergejoin may be our only
-	 * way of implementing a full outer join, so override enable_mergejoin if
-	 * it's a full join.
+	 * way of implementing a full outer join, so disregard the result of
+	 * REL_CAN_USE_PATH() if it's a full join.
 	 */
-	if (enable_mergejoin || jointype == JOIN_FULL)
+	if (REL_CAN_USE_PATH(joinrel, MergeJoin) || jointype == JOIN_FULL)
 		extra.mergeclause_list = select_mergejoin_clauses(root,
 														  joinrel,
 														  outerrel,
@@ -316,10 +316,11 @@ add_paths_to_joinrel(PlannerInfo *root,
 
 	/*
 	 * 4. Consider paths where both outer and inner relations must be hashed
-	 * before being joined.  As above, disregard enable_hashjoin for full
-	 * joins, because there may be no other alternative.
+	 * before being joined.  As above, the result of REL_CAN_USE_PATH()
+	 * doesn't matter for full joins, because there may be no other
+	 * alternative.
 	 */
-	if (enable_hashjoin || jointype == JOIN_FULL)
+	if (REL_CAN_USE_PATH(joinrel, HashJoin) || jointype == JOIN_FULL)
 		hash_inner_and_outer(root, joinrel, outerrel, innerrel,
 							 jointype, &extra);
 
@@ -672,7 +673,7 @@ extract_lateral_vars_from_PHVs(PlannerInfo *root, Relids innerrelids)
  * we do not have a way to extract cache keys from joinrels.
  */
 static Path *
-get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
+get_memoize_path(PlannerInfo *root, RelOptInfo *joinrel, RelOptInfo *innerrel,
 				 RelOptInfo *outerrel, Path *inner_path,
 				 Path *outer_path, JoinType jointype,
 				 JoinPathExtraData *extra)
@@ -684,7 +685,7 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
 	List	   *ph_lateral_vars;
 
 	/* Obviously not if it's disabled */
-	if (!enable_memoize)
+	if (!REL_CAN_USE_PATH(joinrel, Memoize))
 		return NULL;
 
 	/*
@@ -912,7 +913,7 @@ try_nestloop_path(PlannerInfo *root,
 	 * The latter two steps are expensive enough to make this two-phase
 	 * methodology worthwhile.
 	 */
-	initial_cost_nestloop(root, &workspace, jointype,
+	initial_cost_nestloop(root, &workspace, joinrel, jointype,
 						  outer_path, inner_path, extra);
 
 	if (add_path_precheck(joinrel, workspace.disabled_nodes,
@@ -997,7 +998,7 @@ try_partial_nestloop_path(PlannerInfo *root,
 	 * Before creating a path, get a quick lower bound on what it is likely to
 	 * cost.  Bail out right away if it looks terrible.
 	 */
-	initial_cost_nestloop(root, &workspace, jointype,
+	initial_cost_nestloop(root, &workspace, joinrel, jointype,
 						  outer_path, inner_path, extra);
 	if (!add_partial_path_precheck(joinrel, workspace.disabled_nodes,
 								   workspace.total_cost, pathkeys))
@@ -1092,7 +1093,7 @@ try_mergejoin_path(PlannerInfo *root,
 	/*
 	 * See comments in try_nestloop_path().
 	 */
-	initial_cost_mergejoin(root, &workspace, jointype, mergeclauses,
+	initial_cost_mergejoin(root, &workspace, joinrel, jointype, mergeclauses,
 						   outer_path, inner_path,
 						   outersortkeys, innersortkeys,
 						   extra);
@@ -1164,7 +1165,7 @@ try_partial_mergejoin_path(PlannerInfo *root,
 	/*
 	 * See comments in try_partial_nestloop_path().
 	 */
-	initial_cost_mergejoin(root, &workspace, jointype, mergeclauses,
+	initial_cost_mergejoin(root, &workspace, joinrel, jointype, mergeclauses,
 						   outer_path, inner_path,
 						   outersortkeys, innersortkeys,
 						   extra);
@@ -1236,7 +1237,7 @@ try_hashjoin_path(PlannerInfo *root,
 	 * See comments in try_nestloop_path().  Also note that hashjoin paths
 	 * never have any output pathkeys, per comments in create_hashjoin_path.
 	 */
-	initial_cost_hashjoin(root, &workspace, jointype, hashclauses,
+	initial_cost_hashjoin(root, &workspace, joinrel, jointype, hashclauses,
 						  outer_path, inner_path, extra, false);
 
 	if (add_path_precheck(joinrel, workspace.disabled_nodes,
@@ -1298,7 +1299,7 @@ try_partial_hashjoin_path(PlannerInfo *root,
 	 * Before creating a path, get a quick lower bound on what it is likely to
 	 * cost.  Bail out right away if it looks terrible.
 	 */
-	initial_cost_hashjoin(root, &workspace, jointype, hashclauses,
+	initial_cost_hashjoin(root, &workspace, joinrel, jointype, hashclauses,
 						  outer_path, inner_path, extra, parallel_hash);
 	if (!add_partial_path_precheck(joinrel, workspace.disabled_nodes,
 								   workspace.total_cost, NIL))
@@ -1899,10 +1900,11 @@ match_unsorted_outer(PlannerInfo *root,
 	{
 		/*
 		 * Consider materializing the cheapest inner path, unless
-		 * enable_material is off or the path in question materializes its
-		 * output anyway.
+		 * materialization is disabled or the path in question materializes
+		 * its output anyway.
 		 */
-		if (enable_material && inner_cheapest_total != NULL &&
+		if (REL_CAN_USE_PATH(innerrel, Material) &&
+			inner_cheapest_total != NULL &&
 			!ExecMaterializesOutput(inner_cheapest_total->pathtype))
 			matpath = (Path *)
 				create_material_path(innerrel, inner_cheapest_total);
@@ -1982,7 +1984,7 @@ match_unsorted_outer(PlannerInfo *root,
 				 * Try generating a memoize path and see if that makes the
 				 * nested loop any cheaper.
 				 */
-				mpath = get_memoize_path(root, innerrel, outerrel,
+				mpath = get_memoize_path(root, joinrel, innerrel, outerrel,
 										 innerpath, outerpath, jointype,
 										 extra);
 				if (mpath != NULL)
@@ -2134,13 +2136,14 @@ consider_parallel_nestloop(PlannerInfo *root,
 	/*
 	 * Consider materializing the cheapest inner path, unless: 1) we're doing
 	 * JOIN_UNIQUE_INNER, because in this case we have to unique-ify the
-	 * cheapest inner path, 2) enable_material is off, 3) the cheapest inner
-	 * path is not parallel-safe, 4) the cheapest inner path is parameterized
-	 * by the outer rel, or 5) the cheapest inner path materializes its output
-	 * anyway.
+	 * cheapest inner path, 2) MaterialPath is allowed for this rel, 3) the
+	 * cheapest inner path is not parallel-safe, 4) the cheapest inner path is
+	 * parameterized by the outer rel, or 5) the cheapest inner path
+	 * materializes its output anyway.
 	 */
 	if (save_jointype != JOIN_UNIQUE_INNER &&
-		enable_material && inner_cheapest_total->parallel_safe &&
+		REL_CAN_USE_PATH(joinrel, Material) &&
+		inner_cheapest_total->parallel_safe &&
 		!PATH_PARAM_BY_REL(inner_cheapest_total, outerrel) &&
 		!ExecMaterializesOutput(inner_cheapest_total->pathtype))
 	{
@@ -2198,7 +2201,7 @@ consider_parallel_nestloop(PlannerInfo *root,
 			 * Try generating a memoize path and see if that makes the nested
 			 * loop any cheaper.
 			 */
-			mpath = get_memoize_path(root, innerrel, outerrel,
+			mpath = get_memoize_path(root, joinrel, innerrel, outerrel,
 									 innerpath, outerpath, jointype,
 									 extra);
 			if (mpath != NULL)
@@ -2416,7 +2419,7 @@ hash_inner_and_outer(PlannerInfo *root,
 			 */
 			if (innerrel->partial_pathlist != NIL &&
 				save_jointype != JOIN_UNIQUE_INNER &&
-				enable_parallel_hash)
+				REL_CAN_USE_PATH(joinrel, ParallelHash))
 			{
 				cheapest_partial_inner =
 					(Path *) linitial(innerrel->partial_pathlist);
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index e25798972f..26a0f5c0c3 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -505,7 +505,8 @@ get_useful_group_keys_orderings(PlannerInfo *root, Path *path)
 										   root->num_groupby_pathkeys);
 
 		if (n > 0 &&
-			(enable_incremental_sort || n == root->num_groupby_pathkeys) &&
+			(REL_CAN_USE_PATH(path->parent, IncrementalSort) ||
+			 n == root->num_groupby_pathkeys) &&
 			compare_pathkeys(pathkeys, root->group_pathkeys) != PATHKEYS_EQUAL)
 		{
 			info = makeNode(GroupByOrdering);
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index b0323b26ec..ed447916ec 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -505,13 +505,14 @@ create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
 	 * If any suitable quals exist in the rel's baserestrict list, generate a
 	 * plain (unparameterized) TidPath with them.
 	 *
-	 * We skip this when enable_tidscan = false, except when the qual is
-	 * CurrentOfExpr. In that case, a TID scan is the only correct path.
+	 * We skip this when TID scans are disabled for this rel, except when
+	 * the qual is CurrentOfExpr. In that case, a TID scan is the only
+	 * correct path.
 	 */
 	tidquals = TidQualFromRestrictInfoList(root, rel->baserestrictinfo, rel,
 										   &isCurrentOf);
 
-	if (tidquals != NIL && (enable_tidscan || isCurrentOf))
+	if (tidquals != NIL && (REL_CAN_USE_PATH(rel, TIDScan) || isCurrentOf))
 	{
 		/*
 		 * This path uses no join clauses, but it could still have required
@@ -532,8 +533,8 @@ create_tidscan_paths(PlannerInfo *root, RelOptInfo *rel)
 			return true;
 	}
 
-	/* Skip the rest if TID scans are disabled. */
-	if (!enable_tidscan)
+	/* Skip the rest if TID scans are disabled for this rel. */
+	if (!REL_CAN_USE_PATH(rel, TIDScan))
 		return false;
 
 	/*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 8e0e5977a9..3f3770bfa7 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -177,8 +177,8 @@ static List *get_switched_clauses(List *clauses, Relids outerrelids);
 static List *order_qual_clauses(PlannerInfo *root, List *clauses);
 static void copy_generic_path_info(Plan *dest, Path *src);
 static void copy_plan_costsize(Plan *dest, Plan *src);
-static void label_sort_with_costsize(PlannerInfo *root, Sort *plan,
-									 double limit_tuples);
+static void label_sort_with_costsize(PlannerInfo *root, RelOptInfo *rel,
+									 Sort *plan, double limit_tuples);
 static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid);
 static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid,
 								   TableSampleClause *tsc);
@@ -1361,7 +1361,8 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
 											 sortColIdx, sortOperators,
 											 collations, nullsFirst);
 
-				label_sort_with_costsize(root, sort, best_path->limit_tuples);
+				label_sort_with_costsize(root, rel, sort,
+										 best_path->limit_tuples);
 				subplan = (Plan *) sort;
 			}
 		}
@@ -1533,7 +1534,7 @@ create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path,
 										 sortColIdx, sortOperators,
 										 collations, nullsFirst);
 
-			label_sort_with_costsize(root, sort, best_path->limit_tuples);
+			label_sort_with_costsize(root, rel, sort, best_path->limit_tuples);
 			subplan = (Plan *) sort;
 		}
 
@@ -1900,7 +1901,7 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path, int flags)
 			groupColPos++;
 		}
 		sort = make_sort_from_sortclauses(sortList, subplan);
-		label_sort_with_costsize(root, sort, -1.0);
+		label_sort_with_costsize(root, best_path->path.parent, sort, -1.0);
 		plan = (Plan *) make_unique_from_sortclauses((Plan *) sort, sortList);
 	}
 
@@ -4527,7 +4528,8 @@ create_mergejoin_plan(PlannerInfo *root,
 												   best_path->outersortkeys,
 												   outer_relids);
 
-		label_sort_with_costsize(root, sort, -1.0);
+		label_sort_with_costsize(root, best_path->jpath.path.parent,
+								 sort, -1.0);
 		outer_plan = (Plan *) sort;
 		outerpathkeys = best_path->outersortkeys;
 	}
@@ -4541,7 +4543,8 @@ create_mergejoin_plan(PlannerInfo *root,
 												   best_path->innersortkeys,
 												   inner_relids);
 
-		label_sort_with_costsize(root, sort, -1.0);
+		label_sort_with_costsize(root, best_path->jpath.path.parent,
+								 sort, -1.0);
 		inner_plan = (Plan *) sort;
 		innerpathkeys = best_path->innersortkeys;
 	}
@@ -5442,7 +5445,8 @@ copy_plan_costsize(Plan *dest, Plan *src)
  * limit_tuples is as for cost_sort (in particular, pass -1 if no limit)
  */
 static void
-label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
+label_sort_with_costsize(PlannerInfo *root, RelOptInfo *rel, Sort *plan,
+						 double limit_tuples)
 {
 	Plan	   *lefttree = plan->plan.lefttree;
 	Path		sort_path;		/* dummy for result of cost_sort */
@@ -5453,7 +5457,7 @@ label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples)
 	 */
 	Assert(IsA(plan, Sort));
 
-	cost_sort(&sort_path, root, NIL,
+	cost_sort(&sort_path, root, rel, NIL,
 			  lefttree->total_cost,
 			  plan->plan.disabled_nodes,
 			  lefttree->plan_rows,
@@ -6524,7 +6528,7 @@ make_material(Plan *lefttree)
  * Path representation, but it's not worth the trouble yet.
  */
 Plan *
-materialize_finished_plan(Plan *subplan)
+materialize_finished_plan(Plan *subplan, RelOptInfo *rel)
 {
 	Plan	   *matplan;
 	Path		matpath;		/* dummy for result of cost_material */
@@ -6549,7 +6553,7 @@ materialize_finished_plan(Plan *subplan)
 	subplan->total_cost -= initplan_cost;
 
 	/* Set cost data */
-	cost_material(&matpath,
+	cost_material(&matpath, rel,
 				  subplan->disabled_nodes,
 				  subplan->startup_cost,
 				  subplan->total_cost,
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index b5827d3980..c0f431cf0d 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -427,7 +427,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
 	if (cursorOptions & CURSOR_OPT_SCROLL)
 	{
 		if (!ExecSupportsBackwardScan(top_plan))
-			top_plan = materialize_finished_plan(top_plan);
+			top_plan = materialize_finished_plan(top_plan, final_rel);
 	}
 
 	/*
@@ -3831,7 +3831,8 @@ create_grouping_paths(PlannerInfo *root,
 		 * support grouping sets.  create_ordinary_grouping_paths() will check
 		 * additional conditions, such as whether input_rel is partitioned.
 		 */
-		if (enable_partitionwise_aggregate && !parse->groupingSets)
+		if (REL_CAN_USE_PATH(grouped_rel, PartitionwiseAggregate) &&
+			!parse->groupingSets)
 			extra.patype = PARTITIONWISE_AGGREGATE_FULL;
 		else
 			extra.patype = PARTITIONWISE_AGGREGATE_NONE;
@@ -4634,7 +4635,8 @@ create_one_window_path(PlannerInfo *root,
 			 * No presorted keys or incremental sort disabled, just perform a
 			 * complete sort.
 			 */
-			if (presorted_keys == 0 || !enable_incremental_sort)
+			if (presorted_keys == 0 ||
+				!REL_CAN_USE_PATH(window_rel, IncrementalSort))
 				path = (Path *) create_sort_path(root, window_rel,
 												 path,
 												 window_pathkeys,
@@ -4893,7 +4895,8 @@ create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
 				 * cheapest partial path).
 				 */
 				if (input_path != cheapest_partial_path &&
-					(presorted_keys == 0 || !enable_incremental_sort))
+					(presorted_keys == 0 ||
+					 !REL_CAN_USE_PATH(partial_distinct_rel, IncrementalSort)))
 					continue;
 
 				/*
@@ -4901,7 +4904,8 @@ create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
 				 * We'll just do a sort if there are no presorted keys and an
 				 * incremental sort when there are presorted keys.
 				 */
-				if (presorted_keys == 0 || !enable_incremental_sort)
+				if (presorted_keys == 0 ||
+					!REL_CAN_USE_PATH(partial_distinct_rel, IncrementalSort))
 					sorted_path = (Path *) create_sort_path(root,
 															partial_distinct_rel,
 															input_path,
@@ -4961,10 +4965,12 @@ create_partial_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
 	/*
 	 * Now try hash aggregate paths, if enabled and hashing is possible. Since
 	 * we're not on the hook to ensure we do our best to create at least one
-	 * path here, we treat enable_hashagg as a hard off-switch rather than the
-	 * slightly softer variant in create_final_distinct_paths.
+	 * path here, we treat completely skip this if hash aggregation is not
+	 * enabled. (In contrast, create_final_distinct_paths sometimes considers
+	 * hash aggregation even when it's disabled, to avoid failing completely.)
 	 */
-	if (enable_hashagg && grouping_is_hashable(root->processed_distinctClause))
+	if (REL_CAN_USE_PATH(partial_distinct_rel, HashAgg) &&
+		grouping_is_hashable(root->processed_distinctClause))
 	{
 		add_partial_path(partial_distinct_rel, (Path *)
 						 create_agg_path(root,
@@ -5105,7 +5111,8 @@ create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
 				 * cheapest input path).
 				 */
 				if (input_path != cheapest_input_path &&
-					(presorted_keys == 0 || !enable_incremental_sort))
+					(presorted_keys == 0 ||
+					 !REL_CAN_USE_PATH(distinct_rel, IncrementalSort)))
 					continue;
 
 				/*
@@ -5113,7 +5120,8 @@ create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
 				 * We'll just do a sort if there are no presorted keys and an
 				 * incremental sort when there are presorted keys.
 				 */
-				if (presorted_keys == 0 || !enable_incremental_sort)
+				if (presorted_keys == 0 ||
+					!REL_CAN_USE_PATH(distinct_rel, IncrementalSort))
 					sorted_path = (Path *) create_sort_path(root,
 															distinct_rel,
 															input_path,
@@ -5177,14 +5185,14 @@ create_final_distinct_paths(PlannerInfo *root, RelOptInfo *input_rel,
 	 * die trying.  If we do have other choices, there are two things that
 	 * should prevent selection of hashing: if the query uses DISTINCT ON
 	 * (because it won't really have the expected behavior if we hash), or if
-	 * enable_hashagg is off.
+	 * hash aggregation is disabled.
 	 *
 	 * Note: grouping_is_hashable() is much more expensive to check than the
 	 * other gating conditions, so we want to do it last.
 	 */
 	if (distinct_rel->pathlist == NIL)
 		allow_hash = true;		/* we have no alternatives */
-	else if (parse->hasDistinctOn || !enable_hashagg)
+	else if (parse->hasDistinctOn || !REL_CAN_USE_PATH(distinct_rel, HashAgg))
 		allow_hash = false;		/* policy-based decision not to hash */
 	else
 		allow_hash = true;		/* default */
@@ -5277,7 +5285,8 @@ create_ordered_paths(PlannerInfo *root,
 			 * input path).
 			 */
 			if (input_path != cheapest_input_path &&
-				(presorted_keys == 0 || !enable_incremental_sort))
+				(presorted_keys == 0 ||
+				 !REL_CAN_USE_PATH(ordered_rel, IncrementalSort)))
 				continue;
 
 			/*
@@ -5285,7 +5294,8 @@ create_ordered_paths(PlannerInfo *root,
 			 * We'll just do a sort if there are no presorted keys and an
 			 * incremental sort when there are presorted keys.
 			 */
-			if (presorted_keys == 0 || !enable_incremental_sort)
+			if (presorted_keys == 0 ||
+				!REL_CAN_USE_PATH(ordered_rel, IncrementalSort))
 				sorted_path = (Path *) create_sort_path(root,
 														ordered_rel,
 														input_path,
@@ -5349,7 +5359,8 @@ create_ordered_paths(PlannerInfo *root,
 			 * partial path).
 			 */
 			if (input_path != cheapest_partial_path &&
-				(presorted_keys == 0 || !enable_incremental_sort))
+				(presorted_keys == 0 ||
+				 !REL_CAN_USE_PATH(ordered_rel, IncrementalSort)))
 				continue;
 
 			/*
@@ -5357,7 +5368,8 @@ create_ordered_paths(PlannerInfo *root,
 			 * We'll just do a sort if there are no presorted keys and an
 			 * incremental sort when there are presorted keys.
 			 */
-			if (presorted_keys == 0 || !enable_incremental_sort)
+			if (presorted_keys == 0 ||
+				!REL_CAN_USE_PATH(ordered_rel, IncrementalSort))
 				sorted_path = (Path *) create_sort_path(root,
 														ordered_rel,
 														input_path,
@@ -6747,7 +6759,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
 
 	/* Estimate the cost of seq scan + sort */
 	seqScanPath = create_seqscan_path(root, rel, NULL, 0);
-	cost_sort(&seqScanAndSortPath, root, NIL,
+	cost_sort(&seqScanAndSortPath, root, rel, NIL,
 			  seqScanPath->disabled_nodes,
 			  seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
 			  comparisonCost, maintenance_work_mem, -1.0);
@@ -6931,7 +6943,8 @@ make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
 		 * disabled unless it's the cheapest input path).
 		 */
 		if (path != cheapest_path &&
-			(presorted_keys == 0 || !enable_incremental_sort))
+			(presorted_keys == 0 ||
+			 !REL_CAN_USE_PATH(rel, IncrementalSort)))
 			return NULL;
 
 		/*
@@ -6939,7 +6952,8 @@ make_ordered_path(PlannerInfo *root, RelOptInfo *rel, Path *path,
 		 * just do a sort if there are no presorted keys and an incremental
 		 * sort when there are presorted keys.
 		 */
-		if (presorted_keys == 0 || !enable_incremental_sort)
+		if (presorted_keys == 0 ||
+			!REL_CAN_USE_PATH(rel, IncrementalSort))
 			path = (Path *) create_sort_path(root,
 											 rel,
 											 path,
@@ -7540,7 +7554,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
 		 * disabled unless it's the cheapest input path).
 		 */
 		if (path != cheapest_partial_path &&
-			(presorted_keys == 0 || !enable_incremental_sort))
+			(presorted_keys == 0 ||
+			 !REL_CAN_USE_PATH(rel, IncrementalSort)))
 			continue;
 
 		/*
@@ -7548,7 +7563,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
 		 * just do a sort if there are no presorted keys and an incremental
 		 * sort when there are presorted keys.
 		 */
-		if (presorted_keys == 0 || !enable_incremental_sort)
+		if (presorted_keys == 0 ||
+			!REL_CAN_USE_PATH(rel, IncrementalSort))
 			path = (Path *) create_sort_path(root, rel, path,
 											 groupby_pathkeys,
 											 -1.0);
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 6d003cc8e5..6dbcf6e0aa 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -525,13 +525,14 @@ build_subplan(PlannerInfo *root, Plan *plan, Path *path,
 		 * is pointless for a direct-correlated subplan, since we'd have to
 		 * recompute its results each time anyway.  For uncorrelated/undirect
 		 * correlated subplans, we add Material unless the subplan's top plan
-		 * node would materialize its output anyway.  Also, if enable_material
-		 * is false, then the user does not want us to materialize anything
+		 * node would materialize its output anyway.  Also, if Materialize is
+		 * dissabled, then the user does not want us to materialize anything
 		 * unnecessarily, so we don't.
 		 */
-		else if (splan->parParam == NIL && enable_material &&
+		else if (splan->parParam == NIL &&
+				 REL_CAN_USE_PATH(path->parent, Material) &&
 				 !ExecMaterializesOutput(nodeTag(plan)))
-			plan = materialize_finished_plan(plan);
+			plan = materialize_finished_plan(plan, path->parent);
 
 		result = (Node *) splan;
 		isInitPlan = false;
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index a0baf6d4a1..f8fd5db66b 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -587,7 +587,8 @@ build_setop_child_paths(PlannerInfo *root, RelOptInfo *rel,
 			 * input path).
 			 */
 			if (subpath != cheapest_input_path &&
-				(presorted_keys == 0 || !enable_incremental_sort))
+				(presorted_keys == 0 ||
+				 !REL_CAN_USE_PATH(final_rel, IncrementalSort)))
 				continue;
 
 			/*
@@ -595,7 +596,8 @@ build_setop_child_paths(PlannerInfo *root, RelOptInfo *rel,
 			 * We'll just do a sort if there are no presorted keys and an
 			 * incremental sort when there are presorted keys.
 			 */
-			if (presorted_keys == 0 || !enable_incremental_sort)
+			if (presorted_keys == 0 ||
+				!REL_CAN_USE_PATH(final_rel, IncrementalSort))
 				subpath = (Path *) create_sort_path(rel->subroot,
 													final_rel,
 													subpath,
@@ -867,7 +869,7 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root,
 		 * the children.  The precise formula is just a guess; see
 		 * add_paths_to_append_rel.
 		 */
-		if (enable_parallel_append)
+		if (REL_CAN_USE_PATH(result_rel, ParallelAppend))
 		{
 			parallel_workers = Max(parallel_workers,
 								   pg_leftmost_one_pos32(list_length(partial_pathlist)) + 1);
@@ -879,7 +881,8 @@ generate_union_paths(SetOperationStmt *op, PlannerInfo *root,
 		papath = (Path *)
 			create_append_path(root, result_rel, NIL, partial_pathlist,
 							   NIL, NULL, parallel_workers,
-							   enable_parallel_append, -1);
+							   REL_CAN_USE_PATH(result_rel, ParallelAppend),
+							   -1);
 		gpath = (Path *)
 			create_gather_path(root, result_rel, papath,
 							   result_rel->reltarget, NULL, NULL);
@@ -1319,8 +1322,8 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
 				 errmsg("could not implement %s", construct),
 				 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
 
-	/* Prefer sorting when enable_hashagg is off */
-	if (!enable_hashagg)
+	/* Prefer sorting when hash aggregation is disabled */
+	if (!REL_CAN_USE_PATH(input_path->parent, HashAgg))
 		return false;
 
 	/*
@@ -1343,7 +1346,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
 	 * These path variables are dummies that just hold cost fields; we don't
 	 * make actual Paths for these steps.
 	 */
-	cost_agg(&hashed_p, root, AGG_HASHED, NULL,
+	cost_agg(&hashed_p, root, input_path->parent, AGG_HASHED, NULL,
 			 numGroupCols, dNumGroups,
 			 NIL,
 			 input_path->disabled_nodes,
@@ -1358,7 +1361,8 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
 	sorted_p.startup_cost = input_path->startup_cost;
 	sorted_p.total_cost = input_path->total_cost;
 	/* XXX cost_sort doesn't actually look at pathkeys, so just pass NIL */
-	cost_sort(&sorted_p, root, NIL, sorted_p.disabled_nodes,
+	cost_sort(&sorted_p, root, input_path->parent, NIL,
+			  sorted_p.disabled_nodes,
 			  sorted_p.total_cost,
 			  input_path->rows, input_path->pathtarget->width,
 			  0.0, work_mem, -1.0);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index fc97bf6ee2..77ed747437 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1537,6 +1537,7 @@ create_merge_append_path(PlannerInfo *root,
 
 			cost_sort(&sort_path,
 					  root,
+					  rel,
 					  pathkeys,
 					  subpath->disabled_nodes,
 					  subpath->total_cost,
@@ -1649,7 +1650,7 @@ create_material_path(RelOptInfo *rel, Path *subpath)
 
 	pathnode->subpath = subpath;
 
-	cost_material(&pathnode->path,
+	cost_material(&pathnode->path, rel,
 				  subpath->disabled_nodes,
 				  subpath->startup_cost,
 				  subpath->total_cost,
@@ -1698,7 +1699,7 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
 	pathnode->est_entries = 0;
 
 	/* we should not generate this path type when enable_memoize=false */
-	Assert(enable_memoize);
+	Assert(REL_CAN_USE_PATH(rel, Memoize));
 	pathnode->path.disabled_nodes = subpath->disabled_nodes;
 
 	/*
@@ -1866,7 +1867,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
 		/*
 		 * Estimate cost for sort+unique implementation
 		 */
-		cost_sort(&sort_path, root, NIL,
+		cost_sort(&sort_path, root, rel, NIL,
 				  subpath->disabled_nodes,
 				  subpath->total_cost,
 				  rel->rows,
@@ -1901,7 +1902,7 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
 			sjinfo->semi_can_hash = false;
 		}
 		else
-			cost_agg(&agg_path, root,
+			cost_agg(&agg_path, root, rel,
 					 AGG_HASHED, NULL,
 					 numCols, pathnode->path.rows,
 					 NIL,
@@ -3101,7 +3102,7 @@ create_sort_path(PlannerInfo *root,
 
 	pathnode->subpath = subpath;
 
-	cost_sort(&pathnode->path, root, pathkeys,
+	cost_sort(&pathnode->path, root, rel, pathkeys,
 			  subpath->disabled_nodes,
 			  subpath->total_cost,
 			  subpath->rows,
@@ -3288,7 +3289,7 @@ create_agg_path(PlannerInfo *root,
 	pathnode->groupClause = groupClause;
 	pathnode->qual = qual;
 
-	cost_agg(&pathnode->path, root,
+	cost_agg(&pathnode->path, root, rel,
 			 aggstrategy, aggcosts,
 			 list_length(groupClause), numGroups,
 			 qual,
@@ -3395,7 +3396,7 @@ create_groupingsets_path(PlannerInfo *root,
 		 */
 		if (is_first)
 		{
-			cost_agg(&pathnode->path, root,
+			cost_agg(&pathnode->path, root, rel,
 					 aggstrategy,
 					 agg_costs,
 					 numGroupCols,
@@ -3421,7 +3422,7 @@ create_groupingsets_path(PlannerInfo *root,
 				 * Account for cost of aggregation, but don't charge input
 				 * cost again
 				 */
-				cost_agg(&agg_path, root,
+				cost_agg(&agg_path, root, rel,
 						 rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
 						 agg_costs,
 						 numGroupCols,
@@ -3436,7 +3437,7 @@ create_groupingsets_path(PlannerInfo *root,
 			else
 			{
 				/* Account for cost of sort, but don't charge input cost again */
-				cost_sort(&sort_path, root, NIL, 0,
+				cost_sort(&sort_path, root, rel, NIL, 0,
 						  0.0,
 						  subpath->rows,
 						  subpath->pathtarget->width,
@@ -3446,7 +3447,7 @@ create_groupingsets_path(PlannerInfo *root,
 
 				/* Account for cost of aggregation */
 
-				cost_agg(&agg_path, root,
+				cost_agg(&agg_path, root, rel,
 						 AGG_SORTED,
 						 agg_costs,
 						 numGroupCols,
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 78a3cfafde..3b9a3746ee 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -570,7 +570,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
 	/*
 	 * Allow a plugin to editorialize on the info we obtained from the
 	 * catalogs.  Actions might include altering the assumed relation size,
-	 * removing an index, or adding a hypothetical index to the indexlist.
+	 * removing an index, adding a hypothetical index to the indexlist, or
+	 * changing the path type mask.
 	 */
 	if (get_relation_info_hook)
 		(*get_relation_info_hook) (root, relationObjectId, inhparent, rel);
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index d7266e4cdb..88e468795a 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -211,6 +211,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptInfo *parent)
 	rel->consider_startup = (root->tuple_fraction > 0);
 	rel->consider_param_startup = false;	/* might get changed later */
 	rel->consider_parallel = false; /* might get changed later */
+	rel->path_type_mask = default_path_type_mask;
 	rel->reltarget = create_empty_pathtarget();
 	rel->pathlist = NIL;
 	rel->ppilist = NIL;
@@ -707,6 +708,7 @@ build_join_rel(PlannerInfo *root,
 	joinrel->consider_startup = (root->tuple_fraction > 0);
 	joinrel->consider_param_startup = false;
 	joinrel->consider_parallel = false;
+	joinrel->path_type_mask = default_path_type_mask;
 	joinrel->reltarget = create_empty_pathtarget();
 	joinrel->pathlist = NIL;
 	joinrel->ppilist = NIL;
@@ -900,6 +902,7 @@ build_child_join_rel(PlannerInfo *root, RelOptInfo *outer_rel,
 	joinrel->consider_startup = (root->tuple_fraction > 0);
 	joinrel->consider_param_startup = false;
 	joinrel->consider_parallel = false;
+	joinrel->path_type_mask = default_path_type_mask;
 	joinrel->reltarget = create_empty_pathtarget();
 	joinrel->pathlist = NIL;
 	joinrel->ppilist = NIL;
@@ -1484,6 +1487,7 @@ fetch_upper_rel(PlannerInfo *root, UpperRelationKind kind, Relids relids)
 	upperrel->consider_startup = (root->tuple_fraction > 0);
 	upperrel->consider_param_startup = false;
 	upperrel->consider_parallel = false;	/* might get changed later */
+	upperrel->path_type_mask = default_path_type_mask;
 	upperrel->reltarget = create_empty_pathtarget();
 	upperrel->pathlist = NIL;
 	upperrel->cheapest_startup_path = NULL;
@@ -2010,7 +2014,7 @@ build_joinrel_partition_info(PlannerInfo *root,
 	PartitionScheme part_scheme;
 
 	/* Nothing to do if partitionwise join technique is disabled. */
-	if (!enable_partitionwise_join)
+	if (!REL_CAN_USE_PATH(joinrel, PartitionwiseJoin))
 	{
 		Assert(!IS_PARTITIONED_REL(joinrel));
 		return;
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index af227b1f24..1bd21bbe34 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -55,6 +55,7 @@
 #include "optimizer/geqo.h"
 #include "optimizer/optimizer.h"
 #include "optimizer/paths.h"
+#include "optimizer/pathnode.h"
 #include "optimizer/planmain.h"
 #include "parser/parse_expr.h"
 #include "parser/parser.h"
@@ -777,7 +778,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_seqscan,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_seqscan_assign_hook, NULL
 	},
 	{
 		{"enable_indexscan", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -787,7 +788,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_indexscan,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_indexscan_assign_hook, NULL
 	},
 	{
 		{"enable_indexonlyscan", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -797,7 +798,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_indexonlyscan,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_indexonlyscan_assign_hook, NULL
 	},
 	{
 		{"enable_bitmapscan", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -807,7 +808,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_bitmapscan,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_bitmapscan_assign_hook, NULL
 	},
 	{
 		{"enable_tidscan", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -817,7 +818,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_tidscan,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_tidscan_assign_hook, NULL
 	},
 	{
 		{"enable_sort", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -827,7 +828,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_sort,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_sort_assign_hook, NULL
 	},
 	{
 		{"enable_incremental_sort", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -837,7 +838,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_incremental_sort,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_incremental_sort_assign_hook, NULL
 	},
 	{
 		{"enable_hashagg", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -847,7 +848,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_hashagg,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_hashagg_assign_hook, NULL
 	},
 	{
 		{"enable_material", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -857,7 +858,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_material,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_material_assign_hook, NULL
 	},
 	{
 		{"enable_memoize", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -867,7 +868,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_memoize,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_memoize_assign_hook, NULL
 	},
 	{
 		{"enable_nestloop", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -877,7 +878,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_nestloop,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_nestloop_assign_hook, NULL
 	},
 	{
 		{"enable_mergejoin", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -887,7 +888,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_mergejoin,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_mergejoin_assign_hook, NULL
 	},
 	{
 		{"enable_hashjoin", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -897,7 +898,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_hashjoin,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_hashjoin_assign_hook, NULL
 	},
 	{
 		{"enable_gathermerge", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -907,7 +908,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_gathermerge,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_gathermerge_assign_hook, NULL
 	},
 	{
 		{"enable_partitionwise_join", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -917,7 +918,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_partitionwise_join,
 		false,
-		NULL, NULL, NULL
+		NULL, enable_partitionwise_join_assign_hook, NULL
 	},
 	{
 		{"enable_partitionwise_aggregate", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -927,7 +928,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_partitionwise_aggregate,
 		false,
-		NULL, NULL, NULL
+		NULL, enable_partitionwise_aggregate_assign_hook, NULL
 	},
 	{
 		{"enable_parallel_append", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -937,7 +938,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_parallel_append,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_parallel_append_assign_hook, NULL
 	},
 	{
 		{"enable_parallel_hash", PGC_USERSET, QUERY_TUNING_METHOD,
@@ -947,7 +948,7 @@ struct config_bool ConfigureNamesBool[] =
 		},
 		&enable_parallel_hash,
 		true,
-		NULL, NULL, NULL
+		NULL, enable_parallel_hash_assign_hook, NULL
 	},
 	{
 		{"enable_partition_pruning", PGC_USERSET, QUERY_TUNING_METHOD,
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index 540d021592..bbff482cec 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -80,6 +80,26 @@ typedef enum UpperRelationKind
 	/* NB: UPPERREL_FINAL must be last enum entry; it's used to size arrays */
 } UpperRelationKind;
 
+#define PathTypeBitmapScan			0x00000001
+#define PathTypeGatherMerge			0x00000002
+#define PathTypeHashAgg				0x00000004
+#define PathTypeHashJoin			0x00000008
+#define PathTypeIncrementalSort		0x00000010
+#define PathTypeIndexScan			0x00000020
+#define PathTypeIndexOnlyScan		0x00000040
+#define PathTypeMaterial			0x00000080
+#define PathTypeMemoize				0x00000100
+#define PathTypeMergeJoin			0x00000200
+#define PathTypeNestLoop			0x00000400
+#define PathTypeParallelAppend		0x00000800
+#define PathTypeParallelHash		0x00001000
+#define PathTypePartitionwiseJoin	0x00002000
+#define PathTypePartitionwiseAggregate	0x00004000
+#define PathTypeSeqScan				0x00008000
+#define PathTypeSort				0x00010000
+#define PathTypeTIDScan				0x00020000
+#define PathTypeMaskAll				0x0003FFFF
+
 /*----------
  * PlannerGlobal
  *		Global information for planning/optimization
@@ -879,6 +899,8 @@ typedef struct RelOptInfo
 	bool		consider_param_startup;
 	/* consider parallel paths? */
 	bool		consider_parallel;
+	/* path type mask for this rel */
+	uint32		path_type_mask;
 
 	/*
 	 * default result targetlist for Paths scanning this relation; list of
@@ -1065,6 +1087,13 @@ typedef struct RelOptInfo
 	((rel)->part_scheme && (rel)->boundinfo && (rel)->nparts > 0 && \
 	 (rel)->part_rels && (rel)->partexprs && (rel)->nullable_partexprs)
 
+/*
+ * Convenience macro to test for whether a certain a PathTypeXXX bit is
+ * set in a relation's path_type_mask.
+ */
+#define REL_CAN_USE_PATH(rel, type) \
+	(((rel)->path_type_mask & PathType##type) != 0)
+
 /*
  * IndexOptInfo
  *		Per-index information for planning/optimization
diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h
index 854a782944..2cd1d8c34c 100644
--- a/src/include/optimizer/cost.h
+++ b/src/include/optimizer/cost.h
@@ -71,6 +71,26 @@ extern PGDLLIMPORT bool enable_partition_pruning;
 extern PGDLLIMPORT bool enable_presorted_aggregate;
 extern PGDLLIMPORT bool enable_async_append;
 extern PGDLLIMPORT int constraint_exclusion;
+extern PGDLLIMPORT uint32 default_path_type_mask;
+
+extern void enable_bitmapscan_assign_hook(bool newval, void *extra);
+extern void enable_gathermerge_assign_hook(bool newval, void *extra);
+extern void enable_hashagg_assign_hook(bool newval, void *extra);
+extern void enable_hashjoin_assign_hook(bool newval, void *extra);
+extern void enable_incremental_sort_assign_hook(bool newval, void *extra);
+extern void enable_indexscan_assign_hook(bool newval, void *extra);
+extern void enable_indexonlyscan_assign_hook(bool newval, void *extra);
+extern void enable_material_assign_hook(bool newval, void *extra);
+extern void enable_memoize_assign_hook(bool newval, void *extra);
+extern void enable_mergejoin_assign_hook(bool newval, void *extra);
+extern void enable_nestloop_assign_hook(bool newval, void *extra);
+extern void enable_parallel_append_assign_hook(bool newval, void *extra);
+extern void enable_parallel_hash_assign_hook(bool newval, void *extra);
+extern void enable_partitionwise_join_assign_hook(bool newval, void *extra);
+extern void enable_partitionwise_aggregate_assign_hook(bool newval, void *extra);
+extern void enable_seqscan_assign_hook(bool newval, void *extra);
+extern void enable_sort_assign_hook(bool newval, void *extra);
+extern void enable_tidscan_assign_hook(bool newval, void *extra);
 
 extern double index_pages_fetched(double tuples_fetched, BlockNumber pages,
 								  double index_pages, PlannerInfo *root);
@@ -107,7 +127,7 @@ extern void cost_namedtuplestorescan(Path *path, PlannerInfo *root,
 extern void cost_resultscan(Path *path, PlannerInfo *root,
 							RelOptInfo *baserel, ParamPathInfo *param_info);
 extern void cost_recursive_union(Path *runion, Path *nrterm, Path *rterm);
-extern void cost_sort(Path *path, PlannerInfo *root,
+extern void cost_sort(Path *path, PlannerInfo *root, RelOptInfo *rel,
 					  List *pathkeys, int disabled_nodes,
 					  Cost input_cost, double tuples, int width,
 					  Cost comparison_cost, int sort_mem,
@@ -124,11 +144,11 @@ extern void cost_merge_append(Path *path, PlannerInfo *root,
 							  int input_disabled_nodes,
 							  Cost input_startup_cost, Cost input_total_cost,
 							  double tuples);
-extern void cost_material(Path *path,
+extern void cost_material(Path *path, RelOptInfo *rel,
 						  int input_disabled_nodes,
 						  Cost input_startup_cost, Cost input_total_cost,
 						  double tuples, int width);
-extern void cost_agg(Path *path, PlannerInfo *root,
+extern void cost_agg(Path *path, PlannerInfo *root, RelOptInfo *rel,
 					 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
 					 int numGroupCols, double numGroups,
 					 List *quals,
@@ -148,6 +168,7 @@ extern void cost_group(Path *path, PlannerInfo *root,
 					   double input_tuples);
 extern void initial_cost_nestloop(PlannerInfo *root,
 								  JoinCostWorkspace *workspace,
+								  RelOptInfo *joinrel,
 								  JoinType jointype,
 								  Path *outer_path, Path *inner_path,
 								  JoinPathExtraData *extra);
@@ -156,6 +177,7 @@ extern void final_cost_nestloop(PlannerInfo *root, NestPath *path,
 								JoinPathExtraData *extra);
 extern void initial_cost_mergejoin(PlannerInfo *root,
 								   JoinCostWorkspace *workspace,
+								   RelOptInfo *joinrel,
 								   JoinType jointype,
 								   List *mergeclauses,
 								   Path *outer_path, Path *inner_path,
@@ -166,6 +188,7 @@ extern void final_cost_mergejoin(PlannerInfo *root, MergePath *path,
 								 JoinPathExtraData *extra);
 extern void initial_cost_hashjoin(PlannerInfo *root,
 								  JoinCostWorkspace *workspace,
+								  RelOptInfo *joinrel,
 								  JoinType jointype,
 								  List *hashclauses,
 								  Path *outer_path, Path *inner_path,
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index aafc173792..07623eff79 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -45,7 +45,7 @@ extern ForeignScan *make_foreignscan(List *qptlist, List *qpqual,
 									 Plan *outer_plan);
 extern Plan *change_plan_targetlist(Plan *subplan, List *tlist,
 									bool tlist_parallel_safe);
-extern Plan *materialize_finished_plan(Plan *subplan);
+extern Plan *materialize_finished_plan(Plan *subplan, RelOptInfo *rel);
 extern bool is_projection_capable_path(Path *path);
 extern bool is_projection_capable_plan(Plan *plan);
 
-- 
2.39.3 (Apple Git-145)

