yihua commented on code in PR #12161:
URL: https://github.com/apache/hudi/pull/12161#discussion_r1857227393
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestStructuredStreaming.scala:
##########
@@ -498,15 +498,77 @@ class TestStructuredStreaming extends
HoodieSparkClientTestBase {
val inputDF1 = spark.read.json(spark.sparkContext.parallelize(records1, 2))
inputDF1.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
val opts = commonOpts + (DataSourceWriteOptions.TABLE_TYPE.key ->
HoodieTableType.MERGE_ON_READ.name()) +
(DataSourceWriteOptions.STREAMING_DISABLE_COMPACTION.key -> "true")
- streamingWrite(inputDF1.schema, sourcePath, destPath, opts, "000")
+ streamingWrite(inputDF1.schema, sourcePath, destPath, opts)
for (i <- 1 to 24) {
val id = String.format("%03d", new Integer(i))
val records = recordsToStrings(dataGen.generateUpdates(id,
10)).asScala.toList
val inputDF = spark.read.json(spark.sparkContext.parallelize(records, 2))
inputDF.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
- streamingWrite(inputDF.schema, sourcePath, destPath, opts, id)
+ streamingWrite(inputDF.schema, sourcePath, destPath, opts)
}
- val metaClient = HoodieTestUtils.createMetaClient(storage, destPath);
+ val metaClient = HoodieTestUtils.createMetaClient(storage, destPath)
assertTrue(metaClient.getActiveTimeline.getCommitAndReplaceTimeline.empty())
+ assertEquals(25, metaClient.getActiveTimeline.countInstants())
+ }
+
+ @ParameterizedTest
+ @CsvSource(Array(
+ "COPY_ON_WRITE,EVENT_TIME_ORDERING",
+ "MERGE_ON_READ,EVENT_TIME_ORDERING",
+ "COPY_ON_WRITE,COMMIT_TIME_ORDERING",
+ "MERGE_ON_READ,COMMIT_TIME_ORDERING",
+ "COPY_ON_WRITE,CUSTOM",
+ "MERGE_ON_READ,CUSTOM"))
+ def testStructuredStreamingWithMergeMode(tableType: String, mergerType:
String): Unit = {
+ val (sourcePath, destPath) = initStreamingSourceAndDestPath("source",
"dest")
+ // First chunk of data
+ val records1 = recordsToStrings(dataGen.generateInserts("000",
10)).asScala.toList
+ val inputDF1 = spark.read.json(spark.sparkContext.parallelize(records1, 2))
+ inputDF1.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
+ var opts = commonOpts ++ Map(DataSourceWriteOptions.OPERATION.key ->
UPSERT_OPERATION_OPT_VAL,
+ DataSourceWriteOptions.TABLE_TYPE.key() -> tableType,
+ DataSourceWriteOptions.RECORD_MERGE_MODE.key() -> mergerType,
+ HoodieStorageConfig.LOGFILE_DATA_BLOCK_FORMAT.key() -> "parquet",
+ DataSourceWriteOptions.PRECOMBINE_FIELD.key -> "weight")
+ if (mergerType == "CUSTOM") {
+ opts = opts ++ Map(DataSourceWriteOptions.RECORD_MERGE_STRATEGY_ID.key()
-> HoodieSparkDeleteRecordMerger.DELETE_MERGER_STRATEGY,
+ DataSourceWriteOptions.RECORD_MERGE_IMPL_CLASSES.key() ->
classOf[HoodieSparkDeleteRecordMerger].getName)
+ }
+ streamingWrite(inputDF1.schema, sourcePath, destPath, opts)
+
Review Comment:
```suggestion
```
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestStructuredStreaming.scala:
##########
@@ -498,15 +498,77 @@ class TestStructuredStreaming extends
HoodieSparkClientTestBase {
val inputDF1 = spark.read.json(spark.sparkContext.parallelize(records1, 2))
inputDF1.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
val opts = commonOpts + (DataSourceWriteOptions.TABLE_TYPE.key ->
HoodieTableType.MERGE_ON_READ.name()) +
(DataSourceWriteOptions.STREAMING_DISABLE_COMPACTION.key -> "true")
- streamingWrite(inputDF1.schema, sourcePath, destPath, opts, "000")
+ streamingWrite(inputDF1.schema, sourcePath, destPath, opts)
for (i <- 1 to 24) {
val id = String.format("%03d", new Integer(i))
val records = recordsToStrings(dataGen.generateUpdates(id,
10)).asScala.toList
val inputDF = spark.read.json(spark.sparkContext.parallelize(records, 2))
inputDF.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
- streamingWrite(inputDF.schema, sourcePath, destPath, opts, id)
+ streamingWrite(inputDF.schema, sourcePath, destPath, opts)
}
- val metaClient = HoodieTestUtils.createMetaClient(storage, destPath);
+ val metaClient = HoodieTestUtils.createMetaClient(storage, destPath)
assertTrue(metaClient.getActiveTimeline.getCommitAndReplaceTimeline.empty())
+ assertEquals(25, metaClient.getActiveTimeline.countInstants())
+ }
+
+ @ParameterizedTest
+ @CsvSource(Array(
+ "COPY_ON_WRITE,EVENT_TIME_ORDERING",
+ "MERGE_ON_READ,EVENT_TIME_ORDERING",
+ "COPY_ON_WRITE,COMMIT_TIME_ORDERING",
+ "MERGE_ON_READ,COMMIT_TIME_ORDERING",
+ "COPY_ON_WRITE,CUSTOM",
+ "MERGE_ON_READ,CUSTOM"))
+ def testStructuredStreamingWithMergeMode(tableType: String, mergerType:
String): Unit = {
+ val (sourcePath, destPath) = initStreamingSourceAndDestPath("source",
"dest")
+ // First chunk of data
+ val records1 = recordsToStrings(dataGen.generateInserts("000",
10)).asScala.toList
+ val inputDF1 = spark.read.json(spark.sparkContext.parallelize(records1, 2))
+ inputDF1.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
+ var opts = commonOpts ++ Map(DataSourceWriteOptions.OPERATION.key ->
UPSERT_OPERATION_OPT_VAL,
+ DataSourceWriteOptions.TABLE_TYPE.key() -> tableType,
+ DataSourceWriteOptions.RECORD_MERGE_MODE.key() -> mergerType,
+ HoodieStorageConfig.LOGFILE_DATA_BLOCK_FORMAT.key() -> "parquet",
+ DataSourceWriteOptions.PRECOMBINE_FIELD.key -> "weight")
+ if (mergerType == "CUSTOM") {
+ opts = opts ++ Map(DataSourceWriteOptions.RECORD_MERGE_STRATEGY_ID.key()
-> HoodieSparkDeleteRecordMerger.DELETE_MERGER_STRATEGY,
+ DataSourceWriteOptions.RECORD_MERGE_IMPL_CLASSES.key() ->
classOf[HoodieSparkDeleteRecordMerger].getName)
+ }
+ streamingWrite(inputDF1.schema, sourcePath, destPath, opts)
+
+
+ val records2 = recordsToStrings(dataGen.generateUniqueUpdates("001",
5)).asScala.toList
Review Comment:
Could we explicitly generate records that have both smaller and higher
weight values in the updates so the results are different between
`EVENT_TIME_ORDERING` and `COMMIT_TIME_ORDERING` merge modes?
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestStructuredStreaming.scala:
##########
@@ -498,15 +498,77 @@ class TestStructuredStreaming extends
HoodieSparkClientTestBase {
val inputDF1 = spark.read.json(spark.sparkContext.parallelize(records1, 2))
inputDF1.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
val opts = commonOpts + (DataSourceWriteOptions.TABLE_TYPE.key ->
HoodieTableType.MERGE_ON_READ.name()) +
(DataSourceWriteOptions.STREAMING_DISABLE_COMPACTION.key -> "true")
- streamingWrite(inputDF1.schema, sourcePath, destPath, opts, "000")
+ streamingWrite(inputDF1.schema, sourcePath, destPath, opts)
for (i <- 1 to 24) {
val id = String.format("%03d", new Integer(i))
val records = recordsToStrings(dataGen.generateUpdates(id,
10)).asScala.toList
val inputDF = spark.read.json(spark.sparkContext.parallelize(records, 2))
inputDF.coalesce(1).write.mode(SaveMode.Append).json(sourcePath)
- streamingWrite(inputDF.schema, sourcePath, destPath, opts, id)
+ streamingWrite(inputDF.schema, sourcePath, destPath, opts)
}
- val metaClient = HoodieTestUtils.createMetaClient(storage, destPath);
+ val metaClient = HoodieTestUtils.createMetaClient(storage, destPath)
assertTrue(metaClient.getActiveTimeline.getCommitAndReplaceTimeline.empty())
+ assertEquals(25, metaClient.getActiveTimeline.countInstants())
+ }
+
+ @ParameterizedTest
+ @CsvSource(Array(
+ "COPY_ON_WRITE,EVENT_TIME_ORDERING",
+ "MERGE_ON_READ,EVENT_TIME_ORDERING",
+ "COPY_ON_WRITE,COMMIT_TIME_ORDERING",
+ "MERGE_ON_READ,COMMIT_TIME_ORDERING",
+ "COPY_ON_WRITE,CUSTOM",
+ "MERGE_ON_READ,CUSTOM"))
+ def testStructuredStreamingWithMergeMode(tableType: String, mergerType:
String): Unit = {
Review Comment:
```suggestion
def testStructuredStreamingWithMergeMode(tableType: String, mergerMode:
String): Unit = {
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]