HyukjinKwon commented on a change in pull request #27888: [SPARK-31116][SQL] 
Fix nested schema case-sensitivity in ParquetRowConverter
URL: https://github.com/apache/spark/pull/27888#discussion_r392789070
 
 

 ##########
 File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRowConverter.scala
 ##########
 @@ -176,15 +178,38 @@ private[parquet] class ParquetRowConverter(
    */
   def currentRecord: InternalRow = currentRow
 
+
   // Converters for each field.
   private[this] val fieldConverters: Array[Converter with 
HasParentContainerUpdater] = {
-    parquetType.getFields.asScala.map { parquetField =>
-      val fieldIndex = catalystType.fieldIndex(parquetField.getName)
-      val catalystField = catalystType(fieldIndex)
-      // Converted field value should be set to the `fieldIndex`-th cell of 
`currentRow`
-      newConverter(parquetField, catalystField.dataType, new 
RowUpdater(currentRow, fieldIndex))
-    }.toArray
-  }
+
+    // (SPARK-31116) There is an issue when schema pruning is enabled, so we 
keep original codes
+    if (schemaPruning) {
+      // (SPARK-31116) For letter case issue, create name to field index based 
on case sensitivity
+      val catalystFieldNameToIndex = if (caseSensitive) {
+        catalystType.fieldNames.zipWithIndex.toMap
+      } else {
+        CaseInsensitiveMap(catalystType.fieldNames.zipWithIndex.toMap)
+      }
+      parquetType.getFields.asScala.map { parquetField =>
+        val fieldIndex = 
catalystFieldNameToIndex.getOrElse(parquetField.getName,
+          throw new IllegalArgumentException(
+            s"${parquetField.getName} does not exist. " +
+              s"Available: ${catalystType.fieldNames.mkString(", ")}")
+        )
+        val catalystField = catalystType(fieldIndex)
+        // Converted field value should be set to the `fieldIndex`-th cell of 
`currentRow`
+        newConverter(parquetField, catalystField.dataType, new 
RowUpdater(currentRow, fieldIndex))
+      }.toArray
+    } else {
+      parquetType.getFields.asScala.zip(catalystType).zipWithIndex.map {
+        case ((parquetFieldType, catalystField), ordinal) =>
+          // Converted field value should be set to the `ordinal`-th cell of 
`currentRow`
+          newConverter(
+            parquetFieldType, catalystField.dataType, new 
RowUpdater(currentRow, ordinal))
+      }.toArray
+    }
 
 Review comment:
   I actually asked to keep the original codes as were at 
https://github.com/apache/spark/pull/27888#discussion_r391979749 although it 
apparently works same.
   I am okay to remove this branch if we're very sure it works identically.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to