holdenk commented on a change in pull request #26005: [SPARK-29163][SQL]
Simplify Hadoop Configuration access in DataSourcev2
URL: https://github.com/apache/spark/pull/26005#discussion_r332638462
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/orc/OrcScan.scala
##########
@@ -16,33 +16,50 @@
*/
package org.apache.spark.sql.execution.datasources.v2.orc
+import scala.collection.JavaConverters._
+
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
+import org.apache.orc.mapreduce.OrcInputFormat
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.connector.read.PartitionReaderFactory
import org.apache.spark.sql.execution.datasources.PartitioningAwareFileIndex
-import org.apache.spark.sql.execution.datasources.v2.FileScan
+import org.apache.spark.sql.execution.datasources.orc.OrcFilters
+import org.apache.spark.sql.execution.datasources.v2.{BroadcastedHadoopConf,
FileScan}
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
-import org.apache.spark.util.SerializableConfiguration
case class OrcScan(
sparkSession: SparkSession,
- hadoopConf: Configuration,
fileIndex: PartitioningAwareFileIndex,
+ schema: StructType,
dataSchema: StructType,
readDataSchema: StructType,
readPartitionSchema: StructType,
options: CaseInsensitiveStringMap,
pushedFilters: Array[Filter])
- extends FileScan(sparkSession, fileIndex, readDataSchema,
readPartitionSchema) {
+ extends FileScan(sparkSession, fileIndex, readDataSchema,
readPartitionSchema)
+ with BroadcastedHadoopConf {
+
override def isSplitable(path: Path): Boolean = true
+ override def hadoopConf(): Configuration = {
+ if (cachedHadoopConf eq null) {
+ val caseSensitiveMap = options.asCaseSensitiveMap.asScala.toMap
+ // Hadoop Configurations are case sensitive.
+ cachedHadoopConf =
sparkSession.sessionState.newHadoopConfWithOptions(caseSensitiveMap)
+ // The pushed filters will be set in `hadoopConf`. After that, we can
simply use the
+ // changed `hadoopConf` in executors.
+ OrcFilters.createFilter(schema, pushedFilters).foreach { f =>
+ OrcInputFormat.setSearchArgument(hadoopConf, f, dataSchema.fieldNames)
+ }
Review comment:
I did that initially, I'm happy to switch back to it.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]