This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 57bf23c  [SPARK-31037][SQL][FOLLOW-UP] Replace legacy 
ReduceNumShufflePartitions with CoalesceShufflePartitions in comment
57bf23c is described below

commit 57bf23c01b2cffe5011a9d15eb68eff5c28519f4
Author: yi.wu <yi...@databricks.com>
AuthorDate: Tue Mar 10 11:09:36 2020 -0700

    [SPARK-31037][SQL][FOLLOW-UP] Replace legacy ReduceNumShufflePartitions 
with CoalesceShufflePartitions in comment
    
    ### What changes were proposed in this pull request?
    
    Replace legacy `ReduceNumShufflePartitions` with 
`CoalesceShufflePartitions` in comment.
    
    ### Why are the changes needed?
    
    Rule `ReduceNumShufflePartitions` has renamed to 
`CoalesceShufflePartitions`, we should update related comment as well.
    
    ### Does this PR introduce any user-facing change?
    
    No.
    
    ### How was this patch tested?
    
    N/A.
    
    Closes #27865 from Ngone51/spark_31037_followup.
    
    Authored-by: yi.wu <yi...@databricks.com>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
    (cherry picked from commit 34be83e08b6f5313bdd9d165d3e203d06eff677b)
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 .../apache/spark/sql/execution/adaptive/AdaptiveSparkPlanExec.scala | 6 +++---
 .../apache/spark/sql/execution/adaptive/OptimizeSkewedJoin.scala    | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/AdaptiveSparkPlanExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/AdaptiveSparkPlanExec.scala
index fc88a7f..c1486aa 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/AdaptiveSparkPlanExec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/AdaptiveSparkPlanExec.scala
@@ -97,12 +97,12 @@ case class AdaptiveSparkPlanExec(
   @transient private val queryStageOptimizerRules: Seq[Rule[SparkPlan]] = Seq(
     ReuseAdaptiveSubquery(conf, context.subqueryCache),
     // Here the 'OptimizeSkewedJoin' rule should be executed
-    // before 'ReduceNumShufflePartitions', as the skewed partition handled
-    // in 'OptimizeSkewedJoin' rule, should be omitted in 
'ReduceNumShufflePartitions'.
+    // before 'CoalesceShufflePartitions', as the skewed partition handled
+    // in 'OptimizeSkewedJoin' rule, should be omitted in 
'CoalesceShufflePartitions'.
     OptimizeSkewedJoin(conf),
     CoalesceShufflePartitions(conf),
     // The rule of 'OptimizeLocalShuffleReader' need to make use of the 
'partitionStartIndices'
-    // in 'ReduceNumShufflePartitions' rule. So it must be after 
'ReduceNumShufflePartitions' rule.
+    // in 'CoalesceShufflePartitions' rule. So it must be after 
'CoalesceShufflePartitions' rule.
     OptimizeLocalShuffleReader(conf),
     ApplyColumnarRulesAndInsertTransitions(conf, 
context.session.sessionState.columnarRules),
     CollapseCodegenStages(conf)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeSkewedJoin.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeSkewedJoin.scala
index c3bcce4..4387409 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeSkewedJoin.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeSkewedJoin.scala
@@ -52,7 +52,7 @@ import org.apache.spark.sql.internal.SQLConf
  * (L4-1, R4-1), (L4-2, R4-1), (L4-1, R4-2), (L4-2, R4-2)
  *
  * Note that, when this rule is enabled, it also coalesces non-skewed 
partitions like
- * `ReduceNumShufflePartitions` does.
+ * `CoalesceShufflePartitions` does.
  */
 case class OptimizeSkewedJoin(conf: SQLConf) extends Rule[SparkPlan] {
 
@@ -191,7 +191,7 @@ case class OptimizeSkewedJoin(conf: SQLConf) extends 
Rule[SparkPlan] {
       val leftSidePartitions = mutable.ArrayBuffer.empty[ShufflePartitionSpec]
       val rightSidePartitions = mutable.ArrayBuffer.empty[ShufflePartitionSpec]
       // This is used to delay the creation of non-skew partitions so that we 
can potentially
-      // coalesce them like `ReduceNumShufflePartitions` does.
+      // coalesce them like `CoalesceShufflePartitions` does.
       val nonSkewPartitionIndices = mutable.ArrayBuffer.empty[Int]
       val leftSkewDesc = new SkewDesc
       val rightSkewDesc = new SkewDesc


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to