cloud-fan commented on a change in pull request #32084:
URL: https://github.com/apache/spark/pull/32084#discussion_r717712983
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/CoalesceShufflePartitionsSuite.scala
##########
@@ -412,12 +412,10 @@ class CoalesceShufflePartitionsSuite extends
SparkFunSuite with BeforeAndAfterAl
val finalPlan = resultDf.queryExecution.executedPlan
.asInstanceOf[AdaptiveSparkPlanExec].executedPlan
- // As the pre-shuffle partition number are different, we will skip
reducing
- // the shuffle partition numbers.
Review comment:
let's update the comment
```
// Shuffle partition coalescing of the join is performed independent
of the non-grouping
// aggregate on the other side of the union.
```
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
##########
@@ -1705,6 +1705,91 @@ class AdaptiveQueryExecSuite
}
}
+ test("SPARK-34980: Support coalesce partition through union") {
+ def checkResultPartition(
+ df: Dataset[Row],
+ unionNumber: Int,
+ shuffleReaderNumber: Int,
+ partitionNumber: Int): Unit = {
+ df.collect()
+ assert(collect(df.queryExecution.executedPlan) {
+ case u: UnionExec => u
+ }.size == unionNumber)
+ assert(collect(df.queryExecution.executedPlan) {
+ case r: AQEShuffleReadExec => r
+ }.size === shuffleReaderNumber)
+ assert(df.rdd.partitions.length === partitionNumber)
+ }
+
+ Seq(true, false).foreach { combineUnionEnabled =>
+ val combineUnionConfig = if (combineUnionEnabled) {
+ "" -> ""
+ } else {
+ SQLConf.OPTIMIZER_EXCLUDED_RULES.key ->
+ "org.apache.spark.sql.catalyst.optimizer.CombineUnions"
+ }
+ // advisory partition size 1048576 has no special meaning, just a big
enough value
+ withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
+ SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
+ SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "1048576",
+ SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
+ SQLConf.SHUFFLE_PARTITIONS.key -> "10",
+ combineUnionConfig) {
+ withTempView("t1", "t2") {
+ spark.sparkContext.parallelize((1 to 10).map(i => TestData(i,
i.toString)), 2)
+ .toDF().createOrReplaceTempView("t1")
+ spark.sparkContext.parallelize((1 to 10).map(i => TestData(i,
i.toString)), 4)
+ .toDF().createOrReplaceTempView("t2")
+
+ // positive test that could be coalesced
+ checkResultPartition(
+ sql("""
+ |SELECT key, count(*) FROM t1 GROUP BY key
+ |UNION ALL
+ |SELECT * FROM t2
+ """.stripMargin),
+ if (combineUnionEnabled) 1 else 1,
Review comment:
```suggestion
1
```
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
##########
@@ -1705,6 +1705,91 @@ class AdaptiveQueryExecSuite
}
}
+ test("SPARK-34980: Support coalesce partition through union") {
+ def checkResultPartition(
+ df: Dataset[Row],
+ unionNumber: Int,
+ shuffleReaderNumber: Int,
+ partitionNumber: Int): Unit = {
+ df.collect()
+ assert(collect(df.queryExecution.executedPlan) {
+ case u: UnionExec => u
+ }.size == unionNumber)
+ assert(collect(df.queryExecution.executedPlan) {
+ case r: AQEShuffleReadExec => r
+ }.size === shuffleReaderNumber)
+ assert(df.rdd.partitions.length === partitionNumber)
+ }
+
+ Seq(true, false).foreach { combineUnionEnabled =>
+ val combineUnionConfig = if (combineUnionEnabled) {
+ "" -> ""
+ } else {
+ SQLConf.OPTIMIZER_EXCLUDED_RULES.key ->
+ "org.apache.spark.sql.catalyst.optimizer.CombineUnions"
+ }
+ // advisory partition size 1048576 has no special meaning, just a big
enough value
+ withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
+ SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
+ SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "1048576",
+ SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
+ SQLConf.SHUFFLE_PARTITIONS.key -> "10",
+ combineUnionConfig) {
+ withTempView("t1", "t2") {
+ spark.sparkContext.parallelize((1 to 10).map(i => TestData(i,
i.toString)), 2)
+ .toDF().createOrReplaceTempView("t1")
+ spark.sparkContext.parallelize((1 to 10).map(i => TestData(i,
i.toString)), 4)
+ .toDF().createOrReplaceTempView("t2")
+
+ // positive test that could be coalesced
+ checkResultPartition(
+ sql("""
+ |SELECT key, count(*) FROM t1 GROUP BY key
+ |UNION ALL
+ |SELECT * FROM t2
+ """.stripMargin),
+ if (combineUnionEnabled) 1 else 1,
Review comment:
```suggestion
unionNumber = 1
```
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
##########
@@ -1705,6 +1705,91 @@ class AdaptiveQueryExecSuite
}
}
+ test("SPARK-34980: Support coalesce partition through union") {
+ def checkResultPartition(
+ df: Dataset[Row],
+ unionNumber: Int,
+ shuffleReaderNumber: Int,
+ partitionNumber: Int): Unit = {
+ df.collect()
+ assert(collect(df.queryExecution.executedPlan) {
+ case u: UnionExec => u
+ }.size == unionNumber)
+ assert(collect(df.queryExecution.executedPlan) {
+ case r: AQEShuffleReadExec => r
+ }.size === shuffleReaderNumber)
+ assert(df.rdd.partitions.length === partitionNumber)
+ }
+
+ Seq(true, false).foreach { combineUnionEnabled =>
+ val combineUnionConfig = if (combineUnionEnabled) {
+ "" -> ""
+ } else {
+ SQLConf.OPTIMIZER_EXCLUDED_RULES.key ->
+ "org.apache.spark.sql.catalyst.optimizer.CombineUnions"
Review comment:
does this really matter for the "coalesce through union" feature? I
think we can just test the default case, which means this rule is enabled.
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
##########
@@ -1705,6 +1705,91 @@ class AdaptiveQueryExecSuite
}
}
+ test("SPARK-34980: Support coalesce partition through union") {
+ def checkResultPartition(
+ df: Dataset[Row],
+ unionNumber: Int,
+ shuffleReaderNumber: Int,
+ partitionNumber: Int): Unit = {
+ df.collect()
+ assert(collect(df.queryExecution.executedPlan) {
+ case u: UnionExec => u
+ }.size == unionNumber)
+ assert(collect(df.queryExecution.executedPlan) {
+ case r: AQEShuffleReadExec => r
+ }.size === shuffleReaderNumber)
+ assert(df.rdd.partitions.length === partitionNumber)
+ }
+
+ Seq(true, false).foreach { combineUnionEnabled =>
+ val combineUnionConfig = if (combineUnionEnabled) {
+ "" -> ""
+ } else {
+ SQLConf.OPTIMIZER_EXCLUDED_RULES.key ->
+ "org.apache.spark.sql.catalyst.optimizer.CombineUnions"
+ }
+ // advisory partition size 1048576 has no special meaning, just a big
enough value
+ withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
+ SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
+ SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "1048576",
+ SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
+ SQLConf.SHUFFLE_PARTITIONS.key -> "10",
+ combineUnionConfig) {
+ withTempView("t1", "t2") {
+ spark.sparkContext.parallelize((1 to 10).map(i => TestData(i,
i.toString)), 2)
+ .toDF().createOrReplaceTempView("t1")
+ spark.sparkContext.parallelize((1 to 10).map(i => TestData(i,
i.toString)), 4)
+ .toDF().createOrReplaceTempView("t2")
+
+ // positive test that could be coalesced
+ checkResultPartition(
+ sql("""
+ |SELECT key, count(*) FROM t1 GROUP BY key
+ |UNION ALL
+ |SELECT * FROM t2
+ """.stripMargin),
+ if (combineUnionEnabled) 1 else 1,
+ 1,
+ 1 + 4)
+
+ checkResultPartition(
+ sql("""
+ |SELECT key, count(*) FROM t1 GROUP BY key
+ |UNION ALL
+ |SELECT * FROM t2
+ |UNION ALL
+ |SELECT * FROM t1
+ """.stripMargin),
+ if (combineUnionEnabled) 1 else 2,
+ 1,
+ 1 + 4 + 2)
+
+ checkResultPartition(
+ sql("""
+ |SELECT key, count(*) FROM t1 GROUP BY key
+ |UNION ALL
+ |SELECT * FROM t2
+ |UNION ALL
+ |SELECT * FROM t1
+ |UNION ALL
+ |SELECT key, count(*) FROM t2 GROUP BY key
Review comment:
it's not very useful to test 3 unions, as it's similar to the 2 cases
above.
Let's test SMJ UNION AGG
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]