cloud-fan commented on a change in pull request #26516: [SPARK-29893] improve
the local shuffle reader performance by changing the reading task number from 1
to multi.
URL: https://github.com/apache/spark/pull/26516#discussion_r347770880
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeLocalShuffleReader.scala
##########
@@ -27,48 +27,47 @@ import
org.apache.spark.sql.execution.exchange.{EnsureRequirements, ShuffleExcha
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, BuildLeft,
BuildRight, BuildSide}
import org.apache.spark.sql.internal.SQLConf
-object BroadcastJoinWithShuffleLeft {
- def unapply(plan: SparkPlan): Option[(QueryStageExec, BuildSide)] = plan
match {
- case join: BroadcastHashJoinExec if
ShuffleQueryStageExec.isShuffleQueryStageExec(join.left) =>
- Some((join.left.asInstanceOf[QueryStageExec], join.buildSide))
- case _ => None
- }
-}
-
-object BroadcastJoinWithShuffleRight {
- def unapply(plan: SparkPlan): Option[(QueryStageExec, BuildSide)] = plan
match {
- case join: BroadcastHashJoinExec if
ShuffleQueryStageExec.isShuffleQueryStageExec(join.right) =>
- Some((join.right.asInstanceOf[QueryStageExec], join.buildSide))
- case _ => None
- }
-}
-
/**
- * A rule to optimize the shuffle reader to local reader as far as possible
- * when converting the 'SortMergeJoinExec' to 'BroadcastHashJoinExec' in
runtime.
- *
- * This rule can be divided into two steps:
- * Step1: Add the local reader in probe side and then check whether additional
- * shuffle introduced. If introduced, we will revert all the local
- * reader in probe side.
- * Step2: Add the local reader in build side and will not check whether
- * additional shuffle introduced. Because the build side will not
introduce
- * additional shuffle.
+ * A rule to optimize the shuffle reader to local reader iff no additional
shuffles
+ * will be introduced:
+ * 1. if the input plan is a shuffle, add local reader directly as we can
never introduce
+ * extra shuffles in this case.
+ * 2. otherwise, add local reader to the probe side of broadcast hash join and
+ * then run `EnsureRequirements` to check whether additional shuffle
introduced.
+ * If introduced, we will revert all the local readers.
*/
case class OptimizeLocalShuffleReader(conf: SQLConf) extends Rule[SparkPlan] {
+ def withProbeSideLocalReader(plan: SparkPlan): SparkPlan = {
+ plan.transformDown {
+ case join @ OptimizeLocalShuffleReader.BroadcastJoinWithShuffleLeft(
+ shuffleStage, BuildRight) =>
+ val localReader = createLocalReader(shuffleStage)
+ join.asInstanceOf[BroadcastHashJoinExec].copy(left = localReader)
+ case join @ OptimizeLocalShuffleReader.BroadcastJoinWithShuffleRight(
+ shuffleStage, BuildLeft) =>
+ val localReader = createLocalReader(shuffleStage)
+ join.asInstanceOf[BroadcastHashJoinExec].copy(right = localReader)
+ }
+ }
+
+ def createLocalReader(plan: SparkPlan): LocalShuffleReaderExec = {
+ plan match {
+ case c: CoalescedShuffleReaderExec =>
+ LocalShuffleReaderExec(c.child, Some(c.partitionStartIndices.length))
+ case q: QueryStageExec => LocalShuffleReaderExec(q)
+ }
+ }
+
override def apply(plan: SparkPlan): SparkPlan = {
if (!conf.getConf(SQLConf.OPTIMIZE_LOCAL_SHUFFLE_READER_ENABLED)) {
return plan
}
- // Add local reader in probe side.
- val withProbeSideLocalReader = plan.transformDown {
- case join @ BroadcastJoinWithShuffleLeft(shuffleStage, BuildRight) =>
- val localReader = LocalShuffleReaderExec(shuffleStage)
- join.asInstanceOf[BroadcastHashJoinExec].copy(left = localReader)
- case join @ BroadcastJoinWithShuffleRight(shuffleStage, BuildLeft) =>
- val localReader = LocalShuffleReaderExec(shuffleStage)
- join.asInstanceOf[BroadcastHashJoinExec].copy(right = localReader)
+
+ val optimizedPlan = plan match {
+ case s: SparkPlan if
OptimizeLocalShuffleReader.canUseLocalShuffleReader(s) =>
Review comment:
we can `import OptimizeLocalShuffleReader._` at the beginning of `case class
OptimizeLocalShuffleReader`, to save `OptimizeLocalShuffleReader.`
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]