cloud-fan commented on a change in pull request #34984:
URL: https://github.com/apache/spark/pull/34984#discussion_r775761535
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala
##########
@@ -279,22 +277,40 @@ object OrcUtils extends Logging {
}
/**
- * Given a `StructType` object, this methods converts it to corresponding
string representation
- * in ORC.
+ * Given two `StructType` object, this methods converts it to corresponding
string representation
+ * in ORC. The second `StructType` used to change the `TimestampNTZType` as
LongType in result
+ * schema string when reading `TimestampNTZ` as `TimestampLTZ`.
*/
- def orcTypeDescriptionString(dt: DataType): String = dt match {
- case s: StructType =>
+ def orcTypeDescriptionString(
+ dt: DataType, orcDt: Option[DataType] = None): String = (dt, orcDt)
match {
+ case (s1: StructType, Some(s2: StructType)) =>
+ val fieldTypes = s1.fields.map { f =>
+ val idx = s2.fieldNames.indexWhere(caseSensitiveResolution(_, f.name))
+ if (idx == -1) {
+ s"${quoteIdentifier(f.name)}:${orcTypeDescriptionString(f.dataType)}"
+ } else {
+ s"${quoteIdentifier(f.name)}:" +
+ s"${orcTypeDescriptionString(f.dataType, Some(s2(idx).dataType))}"
+ }
+ }
+ s"struct<${fieldTypes.mkString(",")}>"
+ case (s: StructType, None) =>
val fieldTypes = s.fields.map { f =>
s"${quoteIdentifier(f.name)}:${orcTypeDescriptionString(f.dataType)}"
}
s"struct<${fieldTypes.mkString(",")}>"
- case a: ArrayType =>
+ case (a1: ArrayType, Some(a2: ArrayType)) =>
+ s"array<${orcTypeDescriptionString(a1.elementType,
Some(a2.elementType))}>"
+ case (a: ArrayType, None) =>
s"array<${orcTypeDescriptionString(a.elementType)}>"
- case m: MapType =>
+ case (m1: MapType, Some(m2: MapType)) =>
+ s"map<${orcTypeDescriptionString(m1.keyType, Some(m2.keyType))}," +
+ s"${orcTypeDescriptionString(m1.valueType, Some(m2.valueType))}>"
+ case (m: MapType, None) =>
s"map<${orcTypeDescriptionString(m.keyType)},${orcTypeDescriptionString(m.valueType)}>"
- case TimestampNTZType => TypeDescription.Category.TIMESTAMP.getName
- case _: DayTimeIntervalType => LongType.catalogString
- case _: YearMonthIntervalType => IntegerType.catalogString
+ case (_: DayTimeIntervalType | _: TimestampNTZType, _) =>
LongType.catalogString
+ case (_: YearMonthIntervalType, _) => IntegerType.catalogString
+ case (TimestampType, Some(TimestampNTZType)) => LongType.catalogString
Review comment:
When we read a ORC int32 as Spark long type, do we need to correct the
type string as well?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]