alexeykudinkin commented on a change in pull request #4709:
URL: https://github.com/apache/hudi/pull/4709#discussion_r804908249



##########
File path: 
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieMergeOnReadRDD.scala
##########
@@ -61,26 +64,31 @@ class HoodieMergeOnReadRDD(@transient sc: SparkContext,
   } else {
     new Properties()
   }
+
+  private val requiredSchema = tableState.requiredStructSchema
+
+  private val requiredFieldPosition = requiredSchema.map(f => 
tableState.tableStructSchema.fieldIndex(f.name))
+
   override def compute(split: Partition, context: TaskContext): 
Iterator[InternalRow] = {
     val mergeOnReadPartition = split.asInstanceOf[HoodieMergeOnReadPartition]
     val iter = mergeOnReadPartition.split match {
       case dataFileOnlySplit if dataFileOnlySplit.logPaths.isEmpty =>
-        val rows = read(dataFileOnlySplit.dataFile.get, 
requiredSchemaFileReader)
-        extractRequiredSchema(rows)
+        val rows = readParquetFile(dataFileOnlySplit.dataFile.get, 
requiredSchemaFileReader)
+        extractRequiredSchema(rows, requiredSchema, requiredFieldPosition)

Review comment:
       Discussed offline, i will pick this up as part of addressing HUDI-3396




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to