cloud-fan commented on a change in pull request #26516: [SPARK-29893] improve 
the local shuffle reader performance by changing the reading task number from 1 
to multi.
URL: https://github.com/apache/spark/pull/26516#discussion_r347761879
 
 

 ##########
 File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeLocalShuffleReader.scala
 ##########
 @@ -27,78 +27,90 @@ import 
org.apache.spark.sql.execution.exchange.{EnsureRequirements, ShuffleExcha
 import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, BuildLeft, 
BuildRight, BuildSide}
 import org.apache.spark.sql.internal.SQLConf
 
+
 object BroadcastJoinWithShuffleLeft {
-  def unapply(plan: SparkPlan): Option[(QueryStageExec, BuildSide)] = plan 
match {
-    case join: BroadcastHashJoinExec if 
ShuffleQueryStageExec.isShuffleQueryStageExec(join.left) =>
-      Some((join.left.asInstanceOf[QueryStageExec], join.buildSide))
+  def unapply(plan: SparkPlan): Option[(SparkPlan, BuildSide)] = plan match {
+    case join: BroadcastHashJoinExec if OptimizeLocalShuffleReader.
+      canUseLocalShuffleReader(join.left) =>
+      Some((join.left, join.buildSide))
     case _ => None
   }
 }
 
 object BroadcastJoinWithShuffleRight {
-  def unapply(plan: SparkPlan): Option[(QueryStageExec, BuildSide)] = plan 
match {
-    case join: BroadcastHashJoinExec if 
ShuffleQueryStageExec.isShuffleQueryStageExec(join.right) =>
-      Some((join.right.asInstanceOf[QueryStageExec], join.buildSide))
+  def unapply(plan: SparkPlan): Option[(SparkPlan, BuildSide)] = plan match {
+    case join: BroadcastHashJoinExec if OptimizeLocalShuffleReader.
+      canUseLocalShuffleReader(join.right) =>
+      Some((join.right, join.buildSide))
     case _ => None
   }
 }
 
 /**
- * A rule to optimize the shuffle reader to local reader as far as possible
- * when converting the 'SortMergeJoinExec' to 'BroadcastHashJoinExec' in 
runtime.
- *
- * This rule can be divided into two steps:
- * Step1: Add the local reader in probe side and then check whether additional
- *       shuffle introduced. If introduced, we will revert all the local
- *       reader in probe side.
- * Step2: Add the local reader in build side and will not check whether
- *        additional shuffle introduced. Because the build side will not 
introduce
- *        additional shuffle.
+ * A rule to optimize the shuffle reader to local reader iff no additional 
shuffles
+ * will be introduced:
+ * 1. if the input plan is a shuffle, add local reader directly as we can 
never introduce
+ *    extra shuffles in this case.
+ * 2. otherwise, add local reader to the probe side of broadcast hash join and
+ *   then run `EnsureRequirements` to check whether additional shuffle 
introduced.
+ *   If introduced, we will revert all the local readers.
  */
 case class OptimizeLocalShuffleReader(conf: SQLConf) extends Rule[SparkPlan] {
 
-  override def apply(plan: SparkPlan): SparkPlan = {
-    if (!conf.getConf(SQLConf.OPTIMIZE_LOCAL_SHUFFLE_READER_ENABLED)) {
-      return plan
-    }
-    // Add local reader in probe side.
-    val withProbeSideLocalReader = plan.transformDown {
+  def withProbeSideLocalReader(plan: SparkPlan): SparkPlan = {
+    plan.transformDown {
       case join @ BroadcastJoinWithShuffleLeft(shuffleStage, BuildRight) =>
-        val localReader = LocalShuffleReaderExec(shuffleStage)
+        val localReader = withLocalReader(shuffleStage)
         join.asInstanceOf[BroadcastHashJoinExec].copy(left = localReader)
       case join @ BroadcastJoinWithShuffleRight(shuffleStage, BuildLeft) =>
-        val localReader = LocalShuffleReaderExec(shuffleStage)
+        val localReader = withLocalReader(shuffleStage)
         join.asInstanceOf[BroadcastHashJoinExec].copy(right = localReader)
     }
+  }
+
+  def withLocalReader(plan: SparkPlan): LocalShuffleReaderExec = {
 
 Review comment:
   nit: `createLocalReader`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to