xushiyan commented on a change in pull request #1818:
URL: https://github.com/apache/hudi/pull/1818#discussion_r453154177



##########
File path: 
hudi-client/src/test/java/org/apache/hudi/index/hbase/TestHBaseIndex.java
##########
@@ -313,53 +303,17 @@ public void testTotalPutsBatching() throws Exception {
     // Get all the files generated
     int numberOfDataFileIds = (int) writeStatues.map(status -> 
status.getFileId()).distinct().count();
 
-    index.updateLocation(writeStatues, jsc, hoodieTable);
+    index.updateLocation(writeStatues, jsc(), hoodieTable);
     // 3 batches should be executed given batchSize = 100 and 
<=numberOfDataFileIds getting updated,
     // so each fileId ideally gets updates
     verify(table, atMost(numberOfDataFileIds)).put((List<Put>) any());
   }
 
-  @Test
-  public void testPutBatchSizeCalculation() {

Review comment:
       moved to a separate unit test class

##########
File path: 
hudi-client/src/test/java/org/apache/hudi/index/hbase/TestHBaseIndex.java
##########
@@ -460,47 +404,34 @@ public void testDelete() throws Exception {
       // Now tagLocation for these records, hbaseIndex should tag them 
correctly
       metaClient = HoodieTableMetaClient.reload(metaClient);
       hoodieTable = HoodieTable.create(metaClient, config, hadoopConf);
-      javaRDD = index.tagLocation(writeRecords, jsc, hoodieTable);
-      assertEquals(10, javaRDD.filter(record -> 
record.isCurrentLocationKnown()).collect().size());
-      assertEquals(10, javaRDD.map(record -> 
record.getKey().getRecordKey()).distinct().count());
-      assertEquals(10, javaRDD.filter(record -> (record.getCurrentLocation() 
!= null
+      List<HoodieRecord> records2 = index.tagLocation(writeRecords, jsc(), 
hoodieTable).collect();
+      assertEquals(numRecords, records2.stream().filter(record -> 
record.isCurrentLocationKnown()).count());
+      assertEquals(numRecords, records2.stream().map(record -> 
record.getKey().getRecordKey()).distinct().count());
+      assertEquals(numRecords, records2.stream().filter(record -> 
(record.getCurrentLocation() != null
           && 
record.getCurrentLocation().getInstantTime().equals(newCommitTime))).distinct().count());
 
       // Delete all records. This has to be done directly as deleting index 
entries
       // is not implemented via HoodieWriteClient
-      Option recordMetadata = Option.empty();
       JavaRDD<WriteStatus> deleteWriteStatues = writeStatues.map(w -> {
         WriteStatus newWriteStatus = new WriteStatus(true, 1.0);
-        w.getWrittenRecords().forEach(r -> newWriteStatus.markSuccess(new 
HoodieRecord(r.getKey(), null), recordMetadata));
+        w.getWrittenRecords().forEach(r -> newWriteStatus.markSuccess(new 
HoodieRecord(r.getKey(), null), Option.empty()));
         assertEquals(w.getTotalRecords(), newWriteStatus.getTotalRecords());
         newWriteStatus.setStat(new HoodieWriteStat());
         return newWriteStatus;
       });
-      JavaRDD<WriteStatus> deleteStatus = 
index.updateLocation(deleteWriteStatues, jsc, hoodieTable);
+      JavaRDD<WriteStatus> deleteStatus = 
index.updateLocation(deleteWriteStatues, jsc(), hoodieTable);
       assertEquals(deleteStatus.count(), deleteWriteStatues.count());
       assertNoWriteErrors(deleteStatus.collect());
 
       // Ensure no records can be tagged
-      javaRDD = index.tagLocation(writeRecords, jsc, hoodieTable);
-      assertEquals(0, javaRDD.filter(record -> 
record.isCurrentLocationKnown()).collect().size());
-      assertEquals(10, javaRDD.map(record -> 
record.getKey().getRecordKey()).distinct().count());
-      assertEquals(0, javaRDD.filter(record -> (record.getCurrentLocation() != 
null
+      List<HoodieRecord> records3 = index.tagLocation(writeRecords, jsc(), 
hoodieTable).collect();
+      assertEquals(0, records3.stream().filter(record -> 
record.isCurrentLocationKnown()).count());
+      assertEquals(numRecords, records3.stream().map(record -> 
record.getKey().getRecordKey()).distinct().count());
+      assertEquals(0, records3.stream().filter(record -> 
(record.getCurrentLocation() != null
           && 
record.getCurrentLocation().getInstantTime().equals(newCommitTime))).distinct().count());
     }
   }
 
-  @Test
-  public void testFeatureSupport() {

Review comment:
       moved to a separate unit test class
   
   

##########
File path: 
hudi-client/src/test/java/org/apache/hudi/index/hbase/TestHBaseIndex.java
##########
@@ -383,22 +337,12 @@ public void testsHBasePutAccessParallelismWithNoInserts() 
{
     assertEquals(0, hbaseNumPuts);
   }
 
-  @Test
-  public void testsHBaseIndexDefaultQPSResourceAllocator() {

Review comment:
       duplicate test case in 
org.apache.hudi.index.hbase.TestHBaseQPSResourceAllocator




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to