nsivabalan commented on code in PR #13229:
URL: https://github.com/apache/hudi/pull/13229#discussion_r2064994049


##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieClientOnCopyOnWriteStorage.java:
##########
@@ -685,16 +686,27 @@ public void testDeletesForInsertsInSameBatch() throws 
Exception {
     super.testDeletesForInsertsInSameBatch(INSTANT_GENERATOR);
   }
 
+  private Pair<JavaRDD<WriteStatus>, List<HoodieRecord>> 
insertBatchRecords(SparkRDDWriteClient client, String commitTime,
+                                                                            
Integer recordNum, int expectStatusSize, int numSlices,
+                                                                            
Function3<JavaRDD<WriteStatus>, SparkRDDWriteClient, JavaRDD<HoodieRecord>, 
String> writeFn) throws IOException {
+    return insertBatchRecords(client, commitTime, recordNum, expectStatusSize, 
numSlices, writeFn, false);
+  }
+
   private Pair<JavaRDD<WriteStatus>, List<HoodieRecord>> 
insertBatchRecords(SparkRDDWriteClient client, String commitTime,
                                                                          
Integer recordNum, int expectStatusSize, int numSlices,
-                                                                         
Function3<JavaRDD<WriteStatus>, SparkRDDWriteClient, JavaRDD<HoodieRecord>, 
String> writeFn) throws IOException {
+                                                                         
Function3<JavaRDD<WriteStatus>, SparkRDDWriteClient, JavaRDD<HoodieRecord>, 
String> writeFn,
+                                                                              
boolean leaveInflightCommit) throws IOException {

Review Comment:
   lets rename this to "skipCommit" 



##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestMORDataSourceStorage.scala:
##########
@@ -190,6 +190,11 @@ class TestMORDataSourceStorage extends 
SparkClientFunctionalTestHarness {
     // compaction should have been completed
     val metaClient = HoodieTestUtils.createMetaClient(new 
HadoopStorageConfiguration(fs.getConf), basePath)
     assertEquals(1, 
metaClient.getActiveTimeline.getCommitAndReplaceTimeline.countInstants())
+
+    val hudiDF2 = 
spark.read.format("org.apache.hudi").option("hoodie.metadata.enable","true")

Review Comment:
   can we delete this line 



##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestMORDataSourceStorage.scala:
##########
@@ -166,7 +166,7 @@ class TestMORDataSourceStorage extends 
SparkClientFunctionalTestHarness {
     val inputDF1: Dataset[Row] = 
spark.read.json(spark.sparkContext.parallelize(records1, 2))
     inputDF1.write.format("org.apache.hudi")
       .options(options)
-      .option(DataSourceWriteOptions.OPERATION.key, 
DataSourceWriteOptions.INSERT_OPERATION_OPT_VAL)
+      .option(DataSourceWriteOptions.OPERATION.key, 
DataSourceWriteOptions.UPSERT_OPERATION_OPT_VAL)

Review Comment:
   also, can we revert these changes if not required. 



##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/SparkClientFunctionalTestHarness.java:
##########
@@ -332,9 +330,9 @@ protected void 
updateRecordsInMORTable(HoodieTableMetaClient metaClient, List<Ho
     List<WriteStatus> statuses = statusesRdd.collect();
     // Verify there are no errors
     assertNoWriteErrors(statuses);
-    if (doExplicitCommit) {
-      client.commit(commitTime, statusesRdd);
-    }
+    //if (doExplicitCommit) {

Review Comment:
   lets clean this up. 



##########
hudi-client/hudi-client-common/src/test/java/org/apache/hudi/utils/HoodieWriterClientTestHarness.java:
##########
@@ -297,17 +300,27 @@ protected Object castWriteBatch(BaseHoodieWriteClient 
client, String newCommitTi
                                   Function2<List<HoodieRecord>, String, 
Integer> recordGenFunction,
                                   Function3<Object, BaseHoodieWriteClient, 
Object, String> writeFn,
                                   boolean assertForCommit, int 
expRecordsInThisCommit, int expTotalRecords,
-                                  int expTotalCommits, boolean doCommit, 
InstantGenerator instantGenerator) throws Exception {
+                                  int expTotalCommits, InstantGenerator 
instantGenerator) throws Exception {
     return castWriteBatch(client, newCommitTime, prevCommitTime, 
commitTimesBetweenPrevAndNew, initCommitTime, numRecordsInThisCommit, 
recordGenFunction,
-        writeFn, assertForCommit, expRecordsInThisCommit, expTotalRecords, 
expTotalCommits, doCommit, true, instantGenerator);
+        writeFn, assertForCommit, expRecordsInThisCommit, expTotalRecords, 
expTotalCommits, true, instantGenerator);
   }
 
   protected Object castWriteBatch(BaseHoodieWriteClient client, String 
newCommitTime, String prevCommitTime,
                                   Option<List<String>> 
commitTimesBetweenPrevAndNew, String initCommitTime, int numRecordsInThisCommit,
                                   Function2<List<HoodieRecord>, String, 
Integer> recordGenFunction,
                                   Function3<Object, BaseHoodieWriteClient, 
Object, String> writeFn,
-                                  boolean assertForCommit, int 
expRecordsInThisCommit, int expTotalRecords, int expTotalCommits, boolean 
doCommit,
+                                  boolean assertForCommit, int 
expRecordsInThisCommit, int expTotalRecords, int expTotalCommits,
                                   boolean filterForCommitTimeWithAssert, 
InstantGenerator instantGenerator) throws Exception {
+    return castWriteBatch(client, newCommitTime, prevCommitTime, 
commitTimesBetweenPrevAndNew, initCommitTime, numRecordsInThisCommit, 
recordGenFunction,
+        writeFn, assertForCommit, expRecordsInThisCommit, expTotalRecords, 
expTotalCommits, filterForCommitTimeWithAssert, instantGenerator, false);
+  }
+
+  protected Object castWriteBatch(BaseHoodieWriteClient client, String 
newCommitTime, String prevCommitTime,
+                                  Option<List<String>> 
commitTimesBetweenPrevAndNew, String initCommitTime, int numRecordsInThisCommit,
+                                  Function2<List<HoodieRecord>, String, 
Integer> recordGenFunction,
+                                  Function3<Object, BaseHoodieWriteClient, 
Object, String> writeFn,
+                                  boolean assertForCommit, int 
expRecordsInThisCommit, int expTotalRecords, int expTotalCommits,
+                                  boolean filterForCommitTimeWithAssert, 
InstantGenerator instantGenerator, boolean leaveInflightCommit) throws 
Exception {

Review Comment:
   lets name leaveInflightCommit to "skipCommit" to standardize everywhere. 



##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestCOWDataSourceStorage.scala:
##########
@@ -99,7 +99,7 @@ class TestCOWDataSourceStorage extends 
SparkClientFunctionalTestHarness {
     val inputDF0 = spark.read.json(spark.sparkContext.parallelize(records0, 2))
     inputDF0.write.format("org.apache.hudi")
       .options(options)
-      .option(DataSourceWriteOptions.OPERATION.key, 
DataSourceWriteOptions.INSERT_OPERATION_OPT_VAL)
+      .option(DataSourceWriteOptions.OPERATION.key, 
DataSourceWriteOptions.UPSERT_OPERATION_OPT_VAL)

Review Comment:
   can we revert this change if not required 



##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/feature/index/TestExpressionIndex.scala:
##########
@@ -2187,7 +2187,7 @@ class TestExpressionIndex extends HoodieSparkSqlTestBase {
       HoodieExpressionIndex.DYNAMIC_BLOOM_MAX_ENTRIES -> "1000"
     )
     val bloomFilterRecords = 
SparkMetadataWriterUtils.getExpressionIndexRecordsUsingBloomFilter(df, "c5",
-      HoodieWriteConfig.newBuilder().withPath("a/b").build(), "",
+      
HoodieWriteConfig.newBuilder().withPath("a/b").withAutoCommit(false).build(), 
"",

Review Comment:
   remove unwanted changes. 



##########
hudi-client/hudi-java-client/src/test/java/org/apache/hudi/client/functional/TestHoodieJavaClientOnCopyOnWriteStorage.java:
##########
@@ -111,11 +111,11 @@ protected Object castWriteBatch(BaseHoodieWriteClient 
client, String newCommitTi
                                   Option<List<String>> 
commitTimesBetweenPrevAndNew, String initCommitTime, int numRecordsInThisCommit,
                                   Function2<List<HoodieRecord>, String, 
Integer> recordGenFunction,
                                   Function3<Object, BaseHoodieWriteClient, 
Object, String> writeFn,
-                                  boolean assertForCommit, int 
expRecordsInThisCommit, int expTotalRecords, int expTotalCommits, boolean 
doCommit,
-                                  boolean filterForCommitTimeWithAssert, 
InstantGenerator instantGenerator) throws Exception {
+                                  boolean assertForCommit, int 
expRecordsInThisCommit, int expTotalRecords, int expTotalCommits,
+                                  boolean filterForCommitTimeWithAssert, 
InstantGenerator instantGenerator, boolean leaveInflightCommit) throws 
Exception {

Review Comment:
   similarly, rename leaveInflightCommit to skipCommit



##########
hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java:
##########
@@ -426,7 +426,7 @@ public static FileSystemViewManager 
buildFileSystemViewManager(Config config, St
   }
 
   public void close() {
-    LOG.info("Closing Timeline Service");
+    LOG.info("Closing Timeline Service with port " + serverPort);

Review Comment:
   can you put out separate MINOR patch for this logging changes.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to