yihua commented on code in PR #10167:
URL: https://github.com/apache/hudi/pull/10167#discussion_r1410093495
##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/HoodieFileGroupReaderBasedParquetFileFormat.scala:
##########
@@ -300,12 +303,13 @@ class
HoodieFileGroupReaderBasedParquetFileFormat(tableState: HoodieTableState,
val baseFileReader = super.buildReaderWithPartitionValues(sparkSession,
dataSchema, partitionSchema, requiredSchema,
filters ++ requiredFilters, options, new Configuration(hadoopConf))
- //file reader for reading a hudi base file that needs to be merged with
log files
+ // File reader for reading a Hoodie base file that needs to be merged with
log files
val preMergeBaseFileReader = if (isMOR) {
// Add support for reading files using inline file system.
- super.buildReaderWithPartitionValues(sparkSession, dataSchema,
partitionSchema, requiredSchemaWithMandatory,
- if (shouldUseRecordPosition) requiredFilters else
recordKeyRelatedFilters ++ requiredFilters,
- options, new Configuration(hadoopConf))
+ val appliedRequiredSchema =
sparkAdapter.appendRowIndexColumnForParquetFileReader(requiredSchemaWithMandatory,
shouldUseRecordPosition)
Review Comment:
Could you add a test to validate the logic?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]