Github user gatorsmile commented on a diff in the pull request:
https://github.com/apache/spark/pull/21320#discussion_r199389252
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadSupport.scala
---
@@ -71,9 +80,22 @@ private[parquet] class ParquetReadSupport(val convertTz:
Option[TimeZone])
StructType.fromString(schemaString)
}
- val parquetRequestedSchema =
+ val clippedParquetSchema =
ParquetReadSupport.clipParquetSchema(context.getFileSchema,
catalystRequestedSchema)
+ val parquetRequestedSchema = if (parquetMrCompatibility) {
+ // Parquet-mr will throw an exception if we try to read a superset
of the file's schema.
+ // Therefore, we intersect our clipped schema with the underlying
file's schema
+ ParquetReadSupport.intersectParquetGroups(clippedParquetSchema,
context.getFileSchema)
+ .map(intersectionGroup =>
+ new MessageType(intersectionGroup.getName,
intersectionGroup.getFields))
+ .getOrElse(ParquetSchemaConverter.EMPTY_MESSAGE)
+ } else {
+ // Spark's built-in Parquet reader will throw an exception in some
cases if the requested
+ // schema is not the same as the clipped schema
--- End diff --
cc @rdblue @mswit-databricks Do you know the root cause?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]