cloud-fan commented on a change in pull request #33650:
URL: https://github.com/apache/spark/pull/33650#discussion_r691291847
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileScanBuilder.scala
##########
@@ -17,18 +17,24 @@
package org.apache.spark.sql.execution.datasources.v2
import org.apache.spark.sql.SparkSession
-import org.apache.spark.sql.connector.read.{ScanBuilder,
SupportsPushDownRequiredColumns}
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.connector.read.{ScanBuilder,
SupportsPushDownFilters, SupportsPushDownRequiredColumns}
import org.apache.spark.sql.execution.datasources.{PartitioningAwareFileIndex,
PartitioningUtils}
import org.apache.spark.sql.types.StructType
abstract class FileScanBuilder(
sparkSession: SparkSession,
fileIndex: PartitioningAwareFileIndex,
- dataSchema: StructType) extends ScanBuilder with
SupportsPushDownRequiredColumns {
+ dataSchema: StructType)
+ extends ScanBuilder
+ with SupportsPushDownRequiredColumns
+ with SupportsPushDownFilters {
private val partitionSchema = fileIndex.partitionSchema
private val isCaseSensitive =
sparkSession.sessionState.conf.caseSensitiveAnalysis
protected val supportsNestedSchemaPruning = false
protected var requiredSchema = StructType(dataSchema.fields ++
partitionSchema.fields)
+ protected var partitionFilters = Seq.empty[Expression]
+ protected var dataFilters = Seq.empty[Expression]
Review comment:
why do we need the `dataFilters` here? I think we can get the filters
through `pushFilters`?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]