cloud-fan commented on a change in pull request #24675: [SPARK-27803][SQL] fix
column pruning for python UDF
URL: https://github.com/apache/spark/pull/24675#discussion_r286376521
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/SparkOptimizer.scala
##########
@@ -32,14 +32,21 @@ class SparkOptimizer(
override def defaultBatches: Seq[Batch] = (preOptimizationBatches ++
super.defaultBatches :+
Batch("Optimize Metadata Only Query", Once,
OptimizeMetadataOnlyQuery(catalog)) :+
Batch("Extract Python UDFs", Once,
- Seq(ExtractPythonUDFFromAggregate, ExtractPythonUDFs): _*) :+
+ ExtractPythonUDFFromAggregate,
+ ExtractPythonUDFs,
+ // The eval-python node may be between Project/Filter and the scan node,
which breaks
+ // column pruning and filter push-down. Here we rerun the related
optimizer rules.
+ ColumnPruning,
+ PushDownPredicate,
+ RemoveNoopOperators) :+
Batch("Prune File Source Table Partitions", Once,
PruneFileSourcePartitions) :+
Batch("Schema Pruning", Once, SchemaPruning)) ++
postHocOptimizationBatches :+
Batch("User Provided Optimizers", fixedPoint,
experimentalMethods.extraOptimizations: _*)
- override def nonExcludableRules: Seq[String] =
- super.nonExcludableRules :+ ExtractPythonUDFFromAggregate.ruleName
+ override def nonExcludableRules: Seq[String] = super.nonExcludableRules :+
+ ExtractPythonUDFFromAggregate.ruleName :+
+ ExtractPythonUDFs.ruleName
Review comment:
it should be there. We can do it in another PR, but since I'm touching this
file, I just fixed it.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]