gengliangwang commented on a change in pull request #24043: [SPARK-11412][SQL]
Support merge schema for ORC
URL: https://github.com/apache/spark/pull/24043#discussion_r292475532
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala
##########
@@ -82,14 +84,36 @@ object OrcUtils extends Logging {
: Option[StructType] = {
val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
val conf = sparkSession.sessionState.newHadoopConf()
- // TODO: We need to support merge schema. Please see SPARK-11412.
files.toIterator.map(file => readSchema(file.getPath, conf,
ignoreCorruptFiles)).collectFirst {
case Some(schema) =>
logDebug(s"Reading schema from file $files, got Hive schema string:
$schema")
CatalystSqlParser.parseDataType(schema.toString).asInstanceOf[StructType]
}
}
+ /**
+ * Reads ORC file schemas in multi-threaded manner, using native version of
ORC.
+ * This is visible for testing.
+ */
+ def readOrcSchemasInParallel(
+ files: Seq[FileStatus], conf: Configuration, ignoreCorruptFiles: Boolean):
Seq[StructType] = {
+ ThreadUtils.parmap(files, "readingOrcSchemas", 8) { currentFile =>
+ OrcUtils.readSchema(currentFile.getPath, conf, ignoreCorruptFiles)
+ .map(s =>
CatalystSqlParser.parseDataType(s.toString).asInstanceOf[StructType])
+ }.flatten
+ }
+
+ def inferSchema(sparkSession: SparkSession, files: Seq[FileStatus], options:
Map[String, String])
Review comment:
I think we can keep the name `readSchema` here. We rename the method that
reads schema from one file instead.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]