dongjoon-hyun commented on a change in pull request #27888: [SPARK-31116][SQL] 
Fix nested schema case-sensitivity in ParquetRowConverter
URL: https://github.com/apache/spark/pull/27888#discussion_r392549435
 
 

 ##########
 File path: 
sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala
 ##########
 @@ -842,6 +842,333 @@ class FileBasedDataSourceSuite extends QueryTest
       }
     }
   }
+
+  test("SPARK-31116: Select simple parquet with case insensitive and schema 
pruning enabled") {
+    withSQLConf(
+      SQLConf.CASE_SENSITIVE.key -> "false",
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "true") {
+      withTempPath { dir => {
+        val path = dir.getCanonicalPath
+
+        // Prepare values for testing specific parquet record reader
+        Seq("A").toDF("camelCase").write.parquet(path)
+
+        val exactSchema = new StructType().add("camelCase", StringType)
+        checkAnswer(spark.read.schema(exactSchema).parquet(path), Row("A"))
+
+        // In case insensitive manner, parquet's column cases are ignored
+        val caseInsensitiveSchema = new StructType().add("camelcase", 
StringType)
+        checkAnswer(spark.read.schema(caseInsensitiveSchema).parquet(path), 
Row("A"))
+      }}
+    }
+  }
+  test("SPARK-31116: Select simple parquet with case insensitive and schema 
pruning disabled") {
+    withSQLConf(
+      SQLConf.CASE_SENSITIVE.key -> "false",
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "false") {
+      withTempPath { dir => {
+        val path = dir.getCanonicalPath
+
+        // Prepare values for testing specific parquet record reader
+        Seq("A").toDF("camelCase").write.parquet(path)
+
+        val exactSchema = new StructType().add("camelCase", StringType)
+        checkAnswer(spark.read.schema(exactSchema).parquet(path), Row("A"))
+
+        // In case insensitive manner, parquet's column cases are ignored
+        val caseInsensitiveSchema = new StructType().add("camelcase", 
StringType)
+        checkAnswer(spark.read.schema(caseInsensitiveSchema).parquet(path), 
Row("A"))
+      }}
+    }
+  }
+
+  test("SPARK-31116: Select nested parquet with case insensitive and schema 
pruning enabled") {
+    withSQLConf(
+      SQLConf.CASE_SENSITIVE.key -> "false",
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "true") {
+      withTempPath { dir => {
+        val path = dir.getCanonicalPath
+
+        // Prepare values for testing nested parquet data
+        spark
+          .range(1L)
+          .selectExpr("NAMED_STRUCT('lowercase', id, 'camelCase', id + 1) AS 
StructColumn")
+          .write.parquet(path)
+
+        val exactSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType))
+        checkAnswer(spark.read.schema(exactSchema).parquet(path), Row(Row(0, 
1)))
+
+        // In case insensitive manner, parquet's column cases are ignored
+        val innerColumnCaseInsensitiveSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("LowerCase", LongType)
+              .add("camelcase", LongType))
+        checkAnswer(
+          spark.read.schema(innerColumnCaseInsensitiveSchema).parquet(path),
+          Row(Row(0, 1)))
+
+        val rootColumnCaseInsensitiveSchema = new StructType()
+          .add(
+            "structColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType))
+        checkAnswer(
+          spark.read.schema(rootColumnCaseInsensitiveSchema).parquet(path),
+          Row(Row(0, 1)))
+      }}
+    }
+  }
+
+  test("SPARK-31116: Select nested parquet with case insensitive and schema 
pruning disabled") {
+    withSQLConf(
+      SQLConf.CASE_SENSITIVE.key -> "false",
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "false") {
+      withTempPath { dir => {
+        val path = dir.getCanonicalPath
+
+        // Prepare values for testing nested parquet data
+        spark
+          .range(1L)
+          .selectExpr("NAMED_STRUCT('lowercase', id, 'camelCase', id + 1) AS 
StructColumn")
+          .write.parquet(path)
+
+        val exactSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType))
+        checkAnswer(spark.read.schema(exactSchema).parquet(path), Row(Row(0, 
1)))
+
+        // In case insensitive manner, parquet's column cases are ignored
+        val innerColumnCaseInsensitiveSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("LowerCase", LongType)
+              .add("camelcase", LongType))
+        checkAnswer(
+          spark.read.schema(innerColumnCaseInsensitiveSchema).parquet(path),
+          Row(Row(0, 1)))
+
+        val rootColumnCaseInsensitiveSchema = new StructType()
+          .add(
+            "structColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType))
+        checkAnswer(
+          spark.read.schema(rootColumnCaseInsensitiveSchema).parquet(path),
+          Row(Row(0, 1)))
+      }}
+    }
+  }
+
+  test("SPARK-31116: Select simple parquet with case sensitive and schema 
pruning enabled") {
+    withSQLConf(
+      SQLConf.CASE_SENSITIVE.key -> "true",
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "true") {
+      withTempPath { dir => {
+        val path = dir.getCanonicalPath
+
+        // Prepare values for testing specific parquet record reader
+        Seq("A").toDF("camelCase").write.parquet(path)
+
+        val exactSchema = new StructType().add("camelCase", StringType)
+        checkAnswer(spark.read.schema(exactSchema).parquet(path), Row("A"))
+
+        // In case sensitive manner, different letter case does not read column
+        val caseInsensitiveSchema = new StructType().add("camelcase", 
StringType)
+        checkAnswer(spark.read.schema(caseInsensitiveSchema).parquet(path), 
Row(null))
+
+        // It also properly work in combined schema
+        val combinedSchema = new StructType()
+          .add("camelCase", StringType)
+          .add("camelcase", StringType)
+        checkAnswer(spark.read.schema(combinedSchema).parquet(path), Row("A", 
null))
+      }}
+    }
+  }
+
+  test("SPARK-31116: Select simple parquet with case sensitive and schema 
pruning disabled") {
+    withSQLConf(
+      SQLConf.CASE_SENSITIVE.key -> "true",
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "false") {
+      withTempPath { dir => {
+        val path = dir.getCanonicalPath
+
+        // Prepare values for testing specific parquet record reader
+        Seq("A").toDF("camelCase").write.parquet(path)
+
+        val exactSchema = new StructType().add("camelCase", StringType)
+        checkAnswer(spark.read.schema(exactSchema).parquet(path), Row("A"))
+
+        // In case sensitive manner, different letter case does not read column
+        val caseInsensitiveSchema = new StructType().add("camelcase", 
StringType)
+        checkAnswer(spark.read.schema(caseInsensitiveSchema).parquet(path), 
Row(null))
+
+        // It also properly work in combined schema
+        val combinedSchema = new StructType()
+          .add("camelCase", StringType)
+          .add("camelcase", StringType)
+        checkAnswer(spark.read.schema(combinedSchema).parquet(path), Row("A", 
null))
+      }}
+    }
+  }
+
+  test("SPARK-31116: Select nested parquet with case sensitive and schema 
pruning enabled") {
+    withSQLConf(
+      SQLConf.CASE_SENSITIVE.key -> "true",
+      SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key -> "true") {
+      withTempPath { dir => {
+        val path = dir.getCanonicalPath
+
+        // Prepare values for testing nested parquet data
+        spark
+          .range(1)
+          .selectExpr("NAMED_STRUCT('lowercase', id, 'camelCase', id + 1) AS 
StructColumn")
+          .write.parquet(path)
+
+        val exactSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType))
+        checkAnswer(spark.read.schema(exactSchema).parquet(path), Row(Row(0, 
1)))
+
+        val innerColumnCaseInsensitiveSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("LowerCase", LongType)
+              .add("camelcase", LongType))
+        checkAnswer(
+          spark.read.schema(innerColumnCaseInsensitiveSchema).parquet(path),
+          Row(null))
+
+        val innerPartialColumnCaseInsensitiveSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelcase", LongType))
+        checkAnswer(
+          
spark.read.schema(innerPartialColumnCaseInsensitiveSchema).parquet(path),
+          Row(Row(0, null)))
+
+        val rootColumnCaseInsensitiveSchema = new StructType()
+          .add(
+            "structColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType))
+        checkAnswer(
+          spark.read.schema(rootColumnCaseInsensitiveSchema).parquet(path),
+          Row(null))
+
+        val combinedSchema = new StructType()
+          .add(
+            "StructColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType)
+              .add("LowerCase", LongType)
+              .add("camelcase", LongType))
+          .add(
+            "structColumn",
+            new StructType()
+              .add("lowercase", LongType)
+              .add("camelCase", LongType)
+              .add("LowerCase", LongType)
+              .add("camelcase", LongType))
+        checkAnswer(
+          spark.read.schema(combinedSchema).parquet(path),
+          Row(Row(0, 1, null, null), null))
+      }}
+    }
+  }
+
+  test("SPARK-31116: Select nested parquet with case sensitive and schema 
pruning disabled") {
 
 Review comment:
   Please merge this into the above.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to