Github user rdblue commented on a diff in the pull request:
https://github.com/apache/spark/pull/19424#discussion_r143597669
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/PushDownOperatorsToDataSource.scala
---
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources.v2
+
+import org.apache.spark.sql.catalyst.expressions.{And, Attribute,
AttributeMap, Expression, PredicateHelper}
+import org.apache.spark.sql.catalyst.optimizer.{PushDownPredicate,
RemoveRedundantProject}
+import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan,
Project}
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.execution.datasources.DataSourceStrategy
+import org.apache.spark.sql.sources
+import org.apache.spark.sql.sources.v2.reader._
+
+/**
+ * Pushes down various operators to the underlying data source for better
performance. Operators are
+ * being pushed down with a specific order. As an example, given a LIMIT
has a FILTER child, you
+ * can't push down LIMIT if FILTER is not completely pushed down. When
both are pushed down, the
+ * data source should execute FILTER before LIMIT. And required columns
are calculated at the end,
+ * because when more operators are pushed down, we may need less columns
at Spark side.
+ */
+object PushDownOperatorsToDataSource extends Rule[LogicalPlan] with
PredicateHelper {
+ override def apply(plan: LogicalPlan): LogicalPlan = {
+ // make sure filters are at very bottom.
+ val prepared = PushDownPredicate(plan)
+ val afterPushDown = prepared transformUp {
+ case Filter(condition, r @ DataSourceV2Relation(_, reader)) =>
+ val (candidates, containingNonDeterministic) =
+ splitConjunctivePredicates(condition).span(_.deterministic)
+
+ val stayUpFilters: Seq[Expression] = reader match {
+ case r: SupportsPushDownCatalystFilters =>
+ r.pushCatalystFilters(candidates.toArray)
+
+ case r: SupportsPushDownFilters =>
+ // A map from original Catalyst expressions to corresponding
translated data source
+ // filters. If a predicate is not in this map, it means it
cannot be pushed down.
+ val translatedMap: Map[Expression, sources.Filter] =
candidates.flatMap { p =>
+ DataSourceStrategy.translateFilter(p).map(f => p -> f)
+ }.toMap
+
+ // Catalyst predicate expressions that cannot be converted to
data source filters.
+ val nonConvertiblePredicates =
candidates.filterNot(translatedMap.contains)
+
+ // Data source filters that cannot be pushed down. An
unhandled filter means
+ // the data source cannot guarantee the rows returned can pass
the filter.
+ // As a result we must return it so Spark can plan an extra
filter operator.
+ val unhandledFilters =
r.pushFilters(translatedMap.values.toArray).toSet
+ val unhandledPredicates = translatedMap.filter { case (_, f) =>
+ unhandledFilters.contains(f)
+ }.keys
+
+ nonConvertiblePredicates ++ unhandledPredicates
+
+ case _ => candidates
+ }
+
+ val filterCondition = (stayUpFilters ++
containingNonDeterministic).reduceLeftOption(And)
+ filterCondition.map(Filter(_, r)).getOrElse(r)
+
+ // TODO: add more push down rules.
+ }
+
+ // TODO: nested fields pruning
+ def pushDownRequiredColumns(plan: LogicalPlan, requiredByParent:
Seq[Attribute]): Unit = {
+ plan match {
+ case Project(projectList, child) =>
+ val required =
projectList.filter(requiredByParent.contains).flatMap(_.references)
+ pushDownRequiredColumns(child, required)
+
+ case Filter(condition, child) =>
+ val required = requiredByParent ++ condition.references
+ pushDownRequiredColumns(child, required)
+
+ case DataSourceV2Relation(fullOutput, reader) => reader match {
+ case r: SupportsPushDownRequiredColumns =>
+ val attrMap = AttributeMap(fullOutput.zip(fullOutput))
+ val requiredColumns = requiredByParent.map(attrMap)
+ // Match original case of attributes.
+ r.pruneColumns(requiredColumns.toStructType)
+ case _ =>
+ }
+
+ case _ => plan.children.foreach(child =>
pushDownRequiredColumns(child, child.output))
--- End diff --
How do we know that there aren't more cases that need to be supported?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]