dongjoon-hyun commented on a change in pull request #31848:
URL: https://github.com/apache/spark/pull/31848#discussion_r597136204
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileScan.scala
##########
@@ -84,11 +85,25 @@ trait FileScan extends Scan
protected def seqToString(seq: Seq[Any]): String = seq.mkString("[", ", ",
"]")
+ private lazy val (normalizedPartitionFilters, normalizedDataFilters) = {
+ val output = readSchema().toAttributes.map(a =>
a.withName(normalizeName(a.name)))
+ val partitionFilterAttributes =
+ AttributeSet(partitionFilters).map(a => normalizeName(a.name) -> a).toMap
+ val dataFiltersAttributes = AttributeSet(dataFilters).map(a =>
normalizeName(a.name) -> a).toMap
+ val normalizedPartitionFilters = ExpressionSet(partitionFilters.map(
+ QueryPlan.normalizeExpressions(_,
+ output.map(a => partitionFilterAttributes.getOrElse(a.name, a)))))
+ val normalizedDataFilters = ExpressionSet(dataFilters.map(
+ QueryPlan.normalizeExpressions(_,
+ output.map(a => dataFiltersAttributes.getOrElse(a.name, a)))))
+ (normalizedPartitionFilters, normalizedDataFilters)
+ }
+
override def equals(obj: Any): Boolean = obj match {
Review comment:
Shall we do that separately because it's irrelevant to the correctness
issue?
In general, we expect a performance improvement with that, don't we?
In general, Apache Spark doesn't allow to backport performance improvement.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]