maropu commented on a change in pull request #32084:
URL: https://github.com/apache/spark/pull/32084#discussion_r610642147
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
##########
@@ -1575,4 +1575,51 @@ class AdaptiveQueryExecSuite
checkNoCoalescePartitions(df.sort($"key"), ENSURE_REQUIREMENTS)
}
}
+
+ test("SPARK-34980: Support coalesce partition through union") {
+ def checkResultPartition(
+ df: Dataset[Row], shuffleReaderNumber: Int, partitionNumber: Int): Unit
= {
+ df.collect()
+ assert(
+ collect(df.queryExecution.executedPlan) {
+ case s: CustomShuffleReaderExec => s
+ }.size === shuffleReaderNumber
+ )
+ assert(df.rdd.partitions.length === partitionNumber)
+ }
+
+ withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
+ SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
+ SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "1048576",
+ SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
+ SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
+ val df1 = spark.sparkContext.parallelize(
+ (1 to 10).map(i => TestData(i, i.toString)), 2).toDF()
+ val df2 = spark.sparkContext.parallelize(
+ (1 to 10).map(i => TestData(i, i.toString)), 4).toDF()
+
+ // positive test
+ checkResultPartition(
+ df1.groupBy("key").count().unionAll(df2),
+ 1,
+ 1 + 4)
+
+ checkResultPartition(
+ df1.groupBy("key").count().unionAll(df2).unionAll(df1),
+ 1,
+ 1 + 4 + 2)
+
+ checkResultPartition(
+
df1.groupBy("key").count().unionAll(df2).unionAll(df1.groupBy("key").count()),
+ 2,
+ 1 + 4 + 1)
Review comment:
IIUC these physical plans have a single union exec because of
`CombineUnions`? Could you add tests for physical plans having multiple union
execs?
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/CoalesceShufflePartitions.scala
##########
@@ -35,14 +35,25 @@ case class CoalesceShufflePartitions(session: SparkSession)
extends CustomShuffl
if (!conf.coalesceShufflePartitionsEnabled) {
return plan
}
- if (!plan.collectLeaves().forall(_.isInstanceOf[QueryStageExec])
- || plan.find(_.isInstanceOf[CustomShuffleReaderExec]).isDefined) {
- // If not all leaf nodes are query stages, it's not safe to reduce the
number of
- // shuffle partitions, because we may break the assumption that all
children of a spark plan
- // have same number of output partitions.
- return plan
+
+ if (canCoalescePartitions(plan)) {
+ coalescePartitions(plan)
+ } else {
+ plan.transformUp {
+ case u: UnionExec =>
+ u.withNewChildren(u.children.map { child =>
+ if (canCoalescePartitions(child) &&
+ child.find(_.isInstanceOf[UnionExec]).isEmpty) {
Review comment:
We still need this check `child.find(_.isInstanceOf[UnionExec]).isEmpty`
? It seems `canCoalescePartitions(child)` always return false if
`child.find(_.isInstanceOf[UnionExec]).isEmpty` is false?
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/CoalesceShufflePartitions.scala
##########
@@ -35,14 +35,25 @@ case class CoalesceShufflePartitions(session: SparkSession)
extends CustomShuffl
if (!conf.coalesceShufflePartitionsEnabled) {
return plan
}
- if (!plan.collectLeaves().forall(_.isInstanceOf[QueryStageExec])
- || plan.find(_.isInstanceOf[CustomShuffleReaderExec]).isDefined) {
- // If not all leaf nodes are query stages, it's not safe to reduce the
number of
- // shuffle partitions, because we may break the assumption that all
children of a spark plan
- // have same number of output partitions.
- return plan
+
+ if (canCoalescePartitions(plan)) {
+ coalescePartitions(plan)
+ } else {
+ plan.transformUp {
+ case u: UnionExec =>
Review comment:
Could you leave some comments about what this pattern is for?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]