MaxGekk commented on a change in pull request #27112: [SPARK-30428][SQL] File
source V2: support partition pruning
URL: https://github.com/apache/spark/pull/27112#discussion_r365539740
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitions.scala
##########
@@ -39,31 +72,35 @@ private[sql] object PruneFileSourcePartitions extends
Rule[LogicalPlan] {
_,
_))
if filters.nonEmpty && fsRelation.partitionSchemaOption.isDefined =>
- val normalizedFilters = DataSourceStrategy.normalizeExprs(
- filters.filterNot(SubqueryExpression.hasSubquery),
logicalRelation.output)
-
- val sparkSession = fsRelation.sparkSession
- val partitionColumns =
- logicalRelation.resolve(
- partitionSchema, sparkSession.sessionState.analyzer.resolver)
- val partitionSet = AttributeSet(partitionColumns)
- val partitionKeyFilters = ExpressionSet(normalizedFilters.filter { f =>
- f.references.subsetOf(partitionSet)
- })
-
+ val partitionKeyFilters = getPartitionKeyFilters(
+ fsRelation.sparkSession, logicalRelation, partitionSchema, filters,
logicalRelation.output)
if (partitionKeyFilters.nonEmpty) {
val prunedFileIndex =
catalogFileIndex.filterPartitions(partitionKeyFilters.toSeq)
val prunedFsRelation =
- fsRelation.copy(location = prunedFileIndex)(sparkSession)
+ fsRelation.copy(location = prunedFileIndex)(fsRelation.sparkSession)
// Change table stats based on the sizeInBytes of pruned files
val withStats = logicalRelation.catalogTable.map(_.copy(
stats = Some(CatalogStatistics(sizeInBytes =
BigInt(prunedFileIndex.sizeInBytes)))))
val prunedLogicalRelation = logicalRelation.copy(
relation = prunedFsRelation, catalogTable = withStats)
// Keep partition-pruning predicates so that they are visible in
physical planning
- val filterExpression = filters.reduceLeft(And)
- val filter = Filter(filterExpression, prunedLogicalRelation)
- Project(projects, filter)
+ rebuildPhysicalOperation(projects, filters, prunedLogicalRelation)
+ } else {
+ op
+ }
+
+ case op @ PhysicalOperation(projects, filters,
Review comment:
CSV datasource in #26973 doesn't fall to the case but parquet/orc does. And
`withPartitionFilters` is not invoke for CSV. What's wrong with CSV when
filters push down is enabled?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]