diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index cefec7b..0434a5a 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -465,7 +465,8 @@ build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo,
 	 * cheapest path.)
 	 */
 	sorted_path = apply_projection_to_path(subroot, final_rel, sorted_path,
-										   create_pathtarget(subroot, tlist));
+										   create_pathtarget(subroot, tlist),
+										   false);
 
 	/*
 	 * Determine cost to get just the first row of the presorted path.
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 07b925e..f1c3435 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -1500,6 +1500,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 		PathTarget *grouping_target;
 		PathTarget *scanjoin_target;
 		bool		have_grouping;
+		bool		scanjoin_target_parallel_safe = false;
 		WindowFuncLists *wflists = NULL;
 		List	   *activeWindows = NIL;
 		List	   *rollup_lists = NIL;
@@ -1729,6 +1730,11 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 		else
 			scanjoin_target = grouping_target;
 
+		/* check if scanjoin target is parallel safe */
+		if (current_rel->partial_pathlist &&
+			!has_parallel_hazard((Node *) scanjoin_target->exprs, false))
+			scanjoin_target_parallel_safe = true;
+
 		/*
 		 * Forcibly apply that target to all the Paths for the scan/join rel.
 		 *
@@ -1746,7 +1752,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 
 			Assert(subpath->param_info == NULL);
 			path = apply_projection_to_path(root, current_rel,
-											subpath, scanjoin_target);
+											subpath, scanjoin_target,
+											scanjoin_target_parallel_safe);
 			/* If we had to add a Result, path is different from subpath */
 			if (path != subpath)
 			{
@@ -1759,6 +1766,28 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 		}
 
 		/*
+		 * Likewise for any partial paths if the target is parallel safe,
+		 * although this case is simpler, since we don't track the cheapest
+		 * path.  If the scanjoin target is not safe, then we can't even
+		 * generate parallel paths for upper rels.
+		 */
+		if (scanjoin_target_parallel_safe)
+		{
+			foreach(lc, current_rel->partial_pathlist)
+			{
+				Path	   *subpath = (Path *) lfirst(lc);
+
+				Assert(subpath->param_info == NULL);
+				lfirst(lc) = (Path *) create_projection_path(root,
+															 current_rel,
+															 subpath,
+															 scanjoin_target);
+			}
+		}
+		else
+			current_rel->partial_pathlist = NIL;
+
+		/*
 		 * Save the various upper-rel PathTargets we just computed into
 		 * root->upper_targets[].  The core code doesn't use this, but it
 		 * provides a convenient place for extensions to get at the info.  For
@@ -4153,7 +4182,7 @@ create_ordered_paths(PlannerInfo *root,
 			/* Add projection step if needed */
 			if (path->pathtarget != target)
 				path = apply_projection_to_path(root, ordered_rel,
-												path, target);
+												path, target, false);
 
 			add_path(ordered_rel, path);
 		}
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 552b756..30975e0 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -325,7 +325,8 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
 									 refnames_tlist);
 
 		path = apply_projection_to_path(root, rel, path,
-										create_pathtarget(root, tlist));
+										create_pathtarget(root, tlist),
+										false);
 
 		/* Return the fully-fledged tlist to caller, too */
 		*pTargetList = tlist;
@@ -394,7 +395,8 @@ recurse_set_operations(Node *setOp, PlannerInfo *root,
 											path->parent,
 											path,
 											create_pathtarget(root,
-															  *pTargetList));
+															  *pTargetList),
+											false);
 		}
 		return path;
 	}
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 6b57de3..6d4901e 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -2217,12 +2217,14 @@ create_projection_path(PlannerInfo *root,
  * 'rel' is the parent relation associated with the result
  * 'path' is the path representing the source of data
  * 'target' is the PathTarget to be computed
+ * 'target_parallel' indicates that target is parallel safe
  */
 Path *
 apply_projection_to_path(PlannerInfo *root,
 						 RelOptInfo *rel,
 						 Path *path,
-						 PathTarget *target)
+						 PathTarget *target,
+						 bool target_parallel)
 {
 	QualCost	oldcost;
 
@@ -2248,8 +2250,7 @@ apply_projection_to_path(PlannerInfo *root,
 	 * project. But if there is something that is not parallel-safe in the
 	 * target expressions, then we can't.
 	 */
-	if (IsA(path, GatherPath) &&
-		!has_parallel_hazard((Node *) target->exprs, false))
+	if (IsA(path, GatherPath) && target_parallel)
 	{
 		GatherPath *gpath = (GatherPath *) path;
 
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 5de4c34..586ecdd 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -143,7 +143,8 @@ extern ProjectionPath *create_projection_path(PlannerInfo *root,
 extern Path *apply_projection_to_path(PlannerInfo *root,
 						 RelOptInfo *rel,
 						 Path *path,
-						 PathTarget *target);
+						 PathTarget *target,
+						 bool target_parallel);
 extern SortPath *create_sort_path(PlannerInfo *root,
 				 RelOptInfo *rel,
 				 Path *subpath,
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index b51c20c..10e3ec0 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -17,7 +17,7 @@ set parallel_setup_cost=0;
 set parallel_tuple_cost=0;
 set max_parallel_workers_per_gather=4;
 explain (costs off)
-  select count(*) from a_star;
+  select count(*) from a_star where aa < 1000;
                      QUERY PLAN                      
 -----------------------------------------------------
  Finalize Aggregate
@@ -26,17 +26,23 @@ explain (costs off)
          ->  Partial Aggregate
                ->  Append
                      ->  Parallel Seq Scan on a_star
+                           Filter: (aa < 1000)
                      ->  Parallel Seq Scan on b_star
+                           Filter: (aa < 1000)
                      ->  Parallel Seq Scan on c_star
+                           Filter: (aa < 1000)
                      ->  Parallel Seq Scan on d_star
+                           Filter: (aa < 1000)
                      ->  Parallel Seq Scan on e_star
+                           Filter: (aa < 1000)
                      ->  Parallel Seq Scan on f_star
-(11 rows)
+                           Filter: (aa < 1000)
+(17 rows)
 
-select count(*) from a_star;
+select count(*) from a_star where aa < 1000;
  count 
 -------
-    50
+    26
 (1 row)
 
 -- test that parallel_restricted function doesn't run in worker
@@ -78,6 +84,38 @@ select parallel_restricted(unique1) from tenk1
                 9912
 (15 rows)
 
+-- test parallel plan when group by expression is in target list.
+explain (costs off)
+	select length(stringu1) from tenk1 group by length(stringu1);
+                    QUERY PLAN                     
+---------------------------------------------------
+ Finalize HashAggregate
+   Group Key: (length((stringu1)::text))
+   ->  Gather
+         Workers Planned: 4
+         ->  Partial HashAggregate
+               Group Key: length((stringu1)::text)
+               ->  Parallel Seq Scan on tenk1
+(7 rows)
+
+select length(stringu1) from tenk1 group by length(stringu1);
+ length 
+--------
+      6
+(1 row)
+
+-- test that parallel plan for aggregates is not selected when
+-- target list contains parallel restricted clause.
+explain (costs off)
+	select  sum(parallel_restricted(unique1)) from tenk1
+	group by(parallel_restricted(unique1));
+                     QUERY PLAN                     
+----------------------------------------------------
+ HashAggregate
+   Group Key: parallel_restricted(unique1)
+   ->  Index Only Scan using tenk1_unique1 on tenk1
+(3 rows)
+
 set force_parallel_mode=1;
 explain (costs off)
   select stringu1::int2 from tenk1 where unique1 = 1;
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index 22dfb18..1048a65 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -21,8 +21,8 @@ set parallel_tuple_cost=0;
 set max_parallel_workers_per_gather=4;
 
 explain (costs off)
-  select count(*) from a_star;
-select count(*) from a_star;
+  select count(*) from a_star where aa < 1000;
+select count(*) from a_star where aa < 1000;
 
 -- test that parallel_restricted function doesn't run in worker
 alter table tenk1 set (parallel_workers = 4);
@@ -32,6 +32,17 @@ select parallel_restricted(unique1) from tenk1
 select parallel_restricted(unique1) from tenk1
   where stringu1 = 'GRAAAA' order by 1;
 
+-- test parallel plan when group by expression is in target list.
+explain (costs off)
+	select length(stringu1) from tenk1 group by length(stringu1);
+select length(stringu1) from tenk1 group by length(stringu1);
+
+-- test that parallel plan for aggregates is not selected when
+-- target list contains parallel restricted clause.
+explain (costs off)
+	select  sum(parallel_restricted(unique1)) from tenk1
+	group by(parallel_restricted(unique1));
+
 set force_parallel_mode=1;
 
 explain (costs off)
