dbaliafroozeh commented on a change in pull request #28885:
URL: https://github.com/apache/spark/pull/28885#discussion_r447624697



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/reuse/Reuse.scala
##########
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.reuse
+
+import scala.collection.mutable.Map
+
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.execution.{BaseSubqueryExec, 
ExecSubqueryExpression, ReusedSubqueryExec, SparkPlan}
+import org.apache.spark.sql.execution.exchange.{Exchange, ReusedExchangeExec}
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types.StructType
+
+/**
+ * Find out duplicated exchanges and subqueries in the whole spark plan 
including subqueries, then
+ * use the same exhange or subquery for all the references.
+ */
+case class ReuseExchangeAndSubquery(conf: SQLConf) extends Rule[SparkPlan] {
+
+  private class ReuseCache[T <: SparkPlan] {
+    // To avoid costly canonicalization of an exchange or a subquery:
+    // - we use its schema first to check if it can be replaced to a reused 
one at all
+    // - we insert it into the map of canonicalized plans only when at least 2 
have the same schema
+    private val cache = Map[StructType, (T, Map[SparkPlan, T])]()
+
+    def lookup(plan: T): T = {
+      val (firstSameSchemaPlan, sameResultPlans) = 
cache.getOrElseUpdate(plan.schema, plan -> Map())
+      if (firstSameSchemaPlan.ne(plan)) {
+        if (sameResultPlans.isEmpty) {
+          sameResultPlans += firstSameSchemaPlan.canonicalized -> 
firstSameSchemaPlan
+        }
+        sameResultPlans.getOrElseUpdate(plan.canonicalized, plan)
+      } else {
+        plan
+      }
+    }
+  }
+
+  def apply(plan: SparkPlan): SparkPlan = {
+    if (conf.exchangeReuseEnabled || conf.subqueryReuseEnabled) {
+      val exchanges = new ReuseCache[Exchange]()
+      val subqueries = new ReuseCache[BaseSubqueryExec]()
+
+      def reuse(plan: SparkPlan): SparkPlan = plan.transformUp {
+        case exchange: Exchange if conf.exchangeReuseEnabled =>
+          val cached = exchanges.lookup(exchange)
+          if (cached.ne(exchange)) {
+            ReusedExchangeExec(exchange.output, cached)
+          } else {
+            exchange
+          }
+
+        case other => other.transformExpressionsUp {
+          case sub: ExecSubqueryExpression =>
+            val subquery = reuse(sub.plan).asInstanceOf[BaseSubqueryExec]
+            sub.withNewPlan(
+              if (conf.subqueryReuseEnabled) {

Review comment:
       `reuseOrElseAdd` is probably a better name, but this naming is getting 
hard :-) About the new behavior, sounds good to me, but probably if there is no 
performance benefit and it's only happening in the tests, I would have just 
wrap the same instance in a reuse node to also not bother about the second 
method and the equality check, but don't have a strong opinion on this.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to