sarutak commented on a change in pull request #33915:
URL: https://github.com/apache/spark/pull/33915#discussion_r709890456
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala
##########
@@ -85,10 +85,42 @@ object OrcUtils extends Logging {
}
private def toCatalystSchema(schema: TypeDescription): StructType = {
+ import TypeDescription.Category
+
+ def toCatalystType(orcType: TypeDescription): DataType = {
+ orcType.getCategory match {
+ case Category.STRUCT => toStructType(orcType)
+ case Category.LIST => toArrayType(orcType)
+ case Category.MAP => toMapType(orcType)
+ case _ => CatalystSqlParser.parseDataType(orcType.toString)
+ }
+ }
+
+ def toStructType(orcType: TypeDescription): StructType = {
+ val fieldNames = orcType.getFieldNames.asScala
+ val fieldTypes = orcType.getChildren.asScala
+ fieldNames.zip(fieldTypes).foldLeft(new StructType) {
+ case (resultType, (fieldName, fieldType)) =>
+ val catalystType = toCatalystType(fieldType)
+ resultType.add(StructField(fieldName, catalystType))
Review comment:
All the types in ORC seems to have nullability.
https://orc.apache.org/docs/types.html
Schema mapping from Catalyst to ORC seems to be done using
`OrcFileFormat.getQuotedSchemaString`, and it doesn't consider only `name` and
`dataType` so I think we don't need to care about metadata here.
https://github.com/apache/spark/blob/ff7705ad2ad5f4b9dfbeda83e93a0db676e1ffd9/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFileFormat.scala#L47-L59
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala
##########
@@ -85,10 +85,42 @@ object OrcUtils extends Logging {
}
private def toCatalystSchema(schema: TypeDescription): StructType = {
+ import TypeDescription.Category
+
+ def toCatalystType(orcType: TypeDescription): DataType = {
+ orcType.getCategory match {
+ case Category.STRUCT => toStructType(orcType)
+ case Category.LIST => toArrayType(orcType)
+ case Category.MAP => toMapType(orcType)
+ case _ => CatalystSqlParser.parseDataType(orcType.toString)
+ }
+ }
+
+ def toStructType(orcType: TypeDescription): StructType = {
+ val fieldNames = orcType.getFieldNames.asScala
+ val fieldTypes = orcType.getChildren.asScala
+ fieldNames.zip(fieldTypes).foldLeft(new StructType) {
Review comment:
Ah, yes it's not efficient. Thanks
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala
##########
@@ -85,10 +85,42 @@ object OrcUtils extends Logging {
}
private def toCatalystSchema(schema: TypeDescription): StructType = {
+ import TypeDescription.Category
+
+ def toCatalystType(orcType: TypeDescription): DataType = {
+ orcType.getCategory match {
+ case Category.STRUCT => toStructType(orcType)
+ case Category.LIST => toArrayType(orcType)
+ case Category.MAP => toMapType(orcType)
+ case _ => CatalystSqlParser.parseDataType(orcType.toString)
+ }
+ }
+
+ def toStructType(orcType: TypeDescription): StructType = {
+ val fieldNames = orcType.getFieldNames.asScala
+ val fieldTypes = orcType.getChildren.asScala
+ fieldNames.zip(fieldTypes).foldLeft(new StructType) {
+ case (resultType, (fieldName, fieldType)) =>
+ val catalystType = toCatalystType(fieldType)
+ resultType.add(StructField(fieldName, catalystType))
+ }
+ }
+
+ def toArrayType(orcType: TypeDescription): ArrayType = {
+ val elementType = orcType.getChildren.asScala.head
Review comment:
Unfortunately, we cannot do like that because `orcType.getChildren`
returns a `java.util.List`.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]