alexeykudinkin commented on code in PR #7528:
URL: https://github.com/apache/hudi/pull/7528#discussion_r1083261673
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/TestNestedSchemaPruningOptimization.scala:
##########
@@ -113,6 +113,9 @@ class TestNestedSchemaPruningOptimization extends
HoodieSparkSqlTestBase with Sp
//assertEquals(tableName, tableIdentifier.get.table)
//assertEquals(expectedSchema, requiredSchema, hint)
}
+
+ // Execute the query to make sure it's working as expected (smoke
test)
+ selectDF.count
Review Comment:
Correct
##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/BaseFileOnlyRelation.scala:
##########
@@ -106,18 +112,16 @@ class BaseFileOnlyRelation(sqlContext: SQLContext,
}
protected def collectFileSplits(partitionFilters: Seq[Expression],
dataFilters: Seq[Expression]): Seq[HoodieBaseFileSplit] = {
- val partitions = listLatestBaseFiles(globPaths, partitionFilters,
dataFilters)
- val fileSplits = partitions.values.toSeq
- .flatMap { files =>
- files.flatMap { file =>
- // TODO fix, currently assuming parquet as underlying format
- HoodieDataSourceHelper.splitFiles(
- sparkSession = sparkSession,
- file = file,
- partitionValues = getPartitionColumnsAsInternalRow(file)
- )
- }
- }
+ val fileSlices = listLatestFileSlices(globPaths, partitionFilters,
dataFilters)
Review Comment:
We do have coverage for this scenario
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]