dongjoon-hyun commented on a change in pull request #27888: [SPARK-31116][SQL]
Fix nested schema case-sensitivity in ParquetRowConverter
URL: https://github.com/apache/spark/pull/27888#discussion_r392835102
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala
##########
@@ -842,6 +842,41 @@ class FileBasedDataSourceSuite extends QueryTest
}
}
}
+
+ test("SPARK-31116: Select nested parquet with case insensitive mode") {
+ Seq("true", "false").foreach { nestedSchemaPruningEnabled =>
+ withSQLConf(
+ SQLConf.CASE_SENSITIVE.key -> "false",
+ SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key ->
nestedSchemaPruningEnabled) {
+ withTempPath { dir =>
+ val path = dir.getCanonicalPath
+
+ // Prepare values for testing nested parquet data
+ spark
+ .range(1L)
+ .selectExpr("NAMED_STRUCT('lowercase', id, 'camelCase', id + 1) AS
StructColumn")
+ .write.parquet(path)
+
+ val exactSchema = "StructColumn struct<lowercase: LONG, camelCase:
LONG>"
+
+ checkAnswer(spark.read.schema(exactSchema).parquet(path), Row(Row(0,
1)))
+
+ // In case insensitive manner, parquet's column cases are ignored
+ val innerColumnCaseInsensitiveSchema =
+ "StructColumn struct<Lowercase: LONG, camelcase: LONG>"
+ checkAnswer(
+ spark.read.schema(innerColumnCaseInsensitiveSchema).parquet(path),
+ Row(Row(0, 1)))
+
+ val rootColumnCaseInsensitiveSchema =
+ "structColumn struct<lowercase: LONG, camelCase: LONG>"
+ checkAnswer(
+ spark.read.schema(rootColumnCaseInsensitiveSchema).parquet(path),
+ Row(Row(0, 1)))
+ }
+ }
+ }
+ }
Review comment:
@kimtkyeom . I revised your test code. Could you rewrite like the following,
please?
- Generalize test case name
- Generalize test code by using `format`.
- Generalize test code by adding `orc` together. (This is a test coverage
parity: the goal of this test suite.)
```scala
test("SPARK-31116: Select nested schema with case insensitive mode") {
// This test case failed at only Parquet. ORC is added for test coverage
parity.
Seq("orc", "parquet").foreach { format =>
Seq("true", "false").foreach { nestedSchemaPruningEnabled =>
withSQLConf(
SQLConf.CASE_SENSITIVE.key -> "false",
SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key ->
nestedSchemaPruningEnabled) {
withTempPath { dir =>
val path = dir.getCanonicalPath
// Prepare values for testing nested parquet data
spark
.range(1L)
.selectExpr("NAMED_STRUCT('lowercase', id, 'camelCase', id +
1) AS StructColumn")
.write
.format(format)
.save(path)
val exactSchema = "StructColumn struct<lowercase: LONG,
camelCase: LONG>"
checkAnswer(spark.read.schema(exactSchema).format(format).load(path),
Row(Row(0, 1)))
// In case insensitive manner, parquet's column cases are ignored
val innerColumnCaseInsensitiveSchema =
"StructColumn struct<Lowercase: LONG, camelcase: LONG>"
checkAnswer(
spark.read.schema(innerColumnCaseInsensitiveSchema).format(format).load(path),
Row(Row(0, 1)))
val rootColumnCaseInsensitiveSchema =
"structColumn struct<lowercase: LONG, camelCase: LONG>"
checkAnswer(
spark.read.schema(rootColumnCaseInsensitiveSchema).format(format).load(path),
Row(Row(0, 1)))
}
}
}
}
}
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]