yihua commented on code in PR #14003:
URL: https://github.com/apache/hudi/pull/14003#discussion_r2393145271
##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelationV2.scala:
##########
@@ -286,16 +293,57 @@ class IncrementalRelationV2(val sqlContext: SQLContext,
}
}
- private def fullTableScanDataFrame(commitsToFilter: List[HoodieInstant]):
DataFrame = {
+ private def fullTableScanDataFrame(commitsToFilter: List[HoodieInstant],
+ broadcastTimeMap:
org.apache.spark.broadcast.Broadcast[Map[String, String]]): DataFrame = {
val commitTimesToFilter = commitsToFilter.map(_.requestedTime)
val hudiDF = sqlContext.read
.format("hudi_v1")
- .schema(usedSchema)
+ .schema(schema)
.load(basePath.toString)
.filter(col(HoodieRecord.COMMIT_TIME_METADATA_FIELD).isin(commitTimesToFilter:
_*))
- // schema enforcement does not happen in above spark.read with hudi. hence
selecting explicitly w/ right column order
- val fieldNames = usedSchema.fieldNames
- hudiDF.select(fieldNames.head, fieldNames.tail: _*)
+ val fieldNames = schema.fieldNames
+ val selectedDf = hudiDF.select(fieldNames.head, fieldNames.tail: _*)
+ val transformedRDD = addCompletionTimeColumn(selectedDf.rdd,
broadcastTimeMap)
+ sqlContext.createDataFrame(transformedRDD, schema)
Review Comment:
Discussed offline that we don't have to support the completion time in the
relation classes such as `MergeOnReadIncrementalRelationV2` which are not using
the file group reader and going to be deprecated. Let's focus on the reader
paths where the file group reader is enabled.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]