vamshikrishnakyatham commented on code in PR #13943:
URL: https://github.com/apache/hudi/pull/13943#discussion_r2365406149


##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/HoodieFileGroupReaderBasedFileFormat.scala:
##########
@@ -365,7 +365,16 @@ class HoodieFileGroupReaderBasedFileFormat(tablePath: 
String,
                            storageConf: StorageConfiguration[Configuration]): 
Iterator[InternalRow] = {
     if (remainingPartitionSchema.fields.length == 
partitionSchema.fields.length) {
       //none of partition fields are read from the file, so the reader will do 
the appending for us
-      parquetFileReader.read(file, requiredSchema, partitionSchema, 
internalSchemaOpt, filters, storageConf)
+      val pfileUtils = sparkAdapter.getSparkPartitionedFileUtils
+      try {
+        val modifiedFile = 
pfileUtils.createPartitionedFile(file.partitionValues, 
pfileUtils.getPathFromPartitionedFile(file), file.start, file.length)
+        parquetFileReader.read(modifiedFile, requiredSchema, partitionSchema, 
internalSchemaOpt, filters, storageConf)
+      } catch {
+        case _: ClassCastException =>
+          val typedPartitionValues = 
ensurePartitionValuesTyped(file.partitionValues, partitionSchema)
+          val modifiedFile = 
pfileUtils.createPartitionedFile(typedPartitionValues, 
pfileUtils.getPathFromPartitionedFile(file), file.start, file.length)
+          parquetFileReader.read(modifiedFile, requiredSchema, 
partitionSchema, internalSchemaOpt, filters, storageConf)
+      }

Review Comment:
   Oh damn, I missed that, thanks. Added it for else branches as well.
   
   As of now, this is purely internal typecasting but can add logic to do so as 
the example you provided.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to