lokeshj1703 commented on code in PR #13436:
URL: https://github.com/apache/hudi/pull/13436#discussion_r2149880848
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/io/TestAppendHandle.java:
##########
@@ -85,12 +88,69 @@ public void testAppendHandleRLIStats(boolean
populateMetaFields) {
assertEquals(0, writeStatus.getTotalErrorRecords());
// validate write status has all record delegates
if (populateMetaFields) {
- assertEquals(records.size(),
writeStatus.getWrittenRecordDelegates().size());
- for (HoodieRecordDelegate recordDelegate :
writeStatus.getWrittenRecordDelegates()) {
+ assertEquals(records.size(),
writeStatus.getIndexStats().getWrittenRecordDelegates().size());
+ for (HoodieRecordDelegate recordDelegate :
writeStatus.getIndexStats().getWrittenRecordDelegates()) {
assertTrue(recordDelegate.getNewLocation().isPresent());
assertEquals(fileId,
recordDelegate.getNewLocation().get().getFileId());
assertEquals(instantTime,
recordDelegate.getNewLocation().get().getInstantTime());
}
}
}
+
+ @ParameterizedTest
+ @ValueSource(booleans = { true, false })
+ public void testAppendHandleSecondaryIndexStats(boolean populateMetaFields)
throws Exception {
+ // init config and table
+ HoodieWriteConfig config = getConfigBuilder(basePath)
+
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder().withRemoteServerPort(timelineServicePort).build())
+ .withPopulateMetaFields(populateMetaFields)
+ .withMetadataConfig(HoodieMetadataConfig.newBuilder()
+ .enable(true)
+ .withEnableRecordIndex(true)
+ .withStreamingWriteEnabled(true)
+ .withSecondaryIndexEnabled(true)
+ .withSecondaryIndexName("sec-rider")
+ .withSecondaryIndexForColumn("rider")
+ .build())
+ .build();
+
+ HoodieTable table = HoodieSparkTable.create(config, context, metaClient);
+ HoodieTableMetadataWriter metadataWriter =
SparkMetadataWriterFactory.create(storageConf, config, context,
table.getMetaClient().getTableConfig());
+ metadataWriter.close();
+
+ // one round per partition
+ String partitionPath = HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[0];
+ // init some args
+ String fileId = UUID.randomUUID().toString();
+ String instantTime = "000";
+
+ config.setSchema(TRIP_EXAMPLE_SCHEMA);
+ HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(new
String[] {partitionPath});
+ // create parquet file
+ createParquetFile(config, table, partitionPath, fileId, instantTime,
dataGenerator, true);
+ // generate update records
+ instantTime = "001";
+ List<HoodieRecord> records =
dataGenerator.generateUniqueUpdates(instantTime, 50);
+ metaClient = HoodieTableMetaClient.reload(metaClient);
+ table = HoodieSparkTable.create(config, context, metaClient);
+ HoodieAppendHandle handle = new HoodieAppendHandle(config, instantTime,
table, partitionPath, fileId, records.iterator(), new
LocalTaskContextSupplier());
+ Map<String, HoodieRecord> recordMap = new HashMap<>();
+ for (int i = 0; i < records.size(); i++) {
+ recordMap.put(String.valueOf(i), records.get(i));
+ }
+ // write the update records
+ handle.write(recordMap);
+ WriteStatus writeStatus = handle.writeStatus;
+ handle.close();
+
+ assertEquals(records.size(), writeStatus.getTotalRecords());
+ assertEquals(0, writeStatus.getTotalErrorRecords());
+ // validate write status has all record delegates
+ if (populateMetaFields) {
+ assertEquals(1,
writeStatus.getIndexStats().getSecondaryIndexStats().size());
+ // Since the MDT is not populated during the create, the updates would
be considered as new records by the Append handle
+ // Therefore only secondary index records for the 50 updates would
appear here
+ assertEquals(50,
writeStatus.getIndexStats().getSecondaryIndexStats().values().stream().findFirst().get().size());
Review Comment:
Addressed
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/io/TestCreateHandle.java:
##########
@@ -65,20 +68,65 @@ public void testCreateHandleRLIStats(boolean
populateMetaFields) {
config.setSchema(TRIP_EXAMPLE_SCHEMA);
HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(new
String[] {partitionPath});
- Pair<WriteStatus, List<HoodieRecord>> statusListPair =
createParquetFile(config, table, partitionPath, fileId, instantTime,
dataGenerator);
+ Pair<WriteStatus, List<HoodieRecord>> statusListPair =
createParquetFile(config, table, partitionPath, fileId, instantTime,
dataGenerator, true);
WriteStatus writeStatus = statusListPair.getLeft();
List<HoodieRecord> records = statusListPair.getRight();
assertEquals(records.size(), writeStatus.getTotalRecords());
assertEquals(0, writeStatus.getTotalErrorRecords());
// validate write status has all record delegates
if (populateMetaFields) {
- assertEquals(records.size(),
writeStatus.getWrittenRecordDelegates().size());
- for (HoodieRecordDelegate recordDelegate :
writeStatus.getWrittenRecordDelegates()) {
+ assertEquals(records.size(),
writeStatus.getIndexStats().getWrittenRecordDelegates().size());
+ for (HoodieRecordDelegate recordDelegate :
writeStatus.getIndexStats().getWrittenRecordDelegates()) {
assertTrue(recordDelegate.getNewLocation().isPresent());
assertEquals(fileId,
recordDelegate.getNewLocation().get().getFileId());
assertEquals(instantTime,
recordDelegate.getNewLocation().get().getInstantTime());
}
}
}
+
+ @ParameterizedTest
+ @ValueSource(booleans = { true, false })
+ public void testCreateHandleSecondaryIndexStats(boolean populateMetaFields)
throws Exception {
+ // init config and table
+ HoodieWriteConfig config = getConfigBuilder(basePath)
+
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder().withRemoteServerPort(timelineServicePort).build())
+ .withPopulateMetaFields(populateMetaFields)
+ .withMetadataConfig(HoodieMetadataConfig.newBuilder()
+ .enable(true)
+ .withEnableRecordIndex(true)
+ .withStreamingWriteEnabled(true)
+ .withSecondaryIndexEnabled(true)
+ .withSecondaryIndexName("sec-rider")
+ .withSecondaryIndexForColumn("rider")
+ .build())
+ .build();
+
+ HoodieTable table = HoodieSparkTable.create(config, context, metaClient);
+ HoodieTableMetadataWriter metadataWriter =
SparkMetadataWriterFactory.create(storageConf, config, context,
table.getMetaClient().getTableConfig());
+ metadataWriter.close();
+
+ // one round per partition
+ String partitionPath = HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[0];
+
+ // init some args
+ String fileId = UUID.randomUUID().toString();
+ String instantTime = "000";
+
+ config.setSchema(TRIP_EXAMPLE_SCHEMA);
+ metaClient = HoodieTableMetaClient.reload(metaClient);
+ table = HoodieSparkTable.create(config, context, metaClient);
+ HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(new
String[] {partitionPath});
+ Pair<WriteStatus, List<HoodieRecord>> statusListPair =
createParquetFile(config, table, partitionPath, fileId, instantTime,
dataGenerator, true);
+ WriteStatus writeStatus = statusListPair.getLeft();
+ List<HoodieRecord> records = statusListPair.getRight();
+
+ assertEquals(records.size(), writeStatus.getTotalRecords());
+ assertEquals(0, writeStatus.getTotalErrorRecords());
+ // validate write status has all record delegates
+ if (populateMetaFields) {
+ assertEquals(1,
writeStatus.getIndexStats().getSecondaryIndexStats().size());
+ assertEquals(100,
writeStatus.getIndexStats().getSecondaryIndexStats().values().stream().findFirst().get().size());
Review Comment:
Addressed
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/io/TestMergeHandle.java:
##########
@@ -81,4 +85,57 @@ public void testMergeHandleRLIStats() throws IOException {
assertEquals(records.size(), writeStatus.getStat().getNumWrites());
assertEquals(10, writeStatus.getStat().getNumUpdateWrites());
}
+
+ @Test
+ public void testMergeHandleSecondaryIndexStats() throws Exception {
+ // init config and table
+ HoodieWriteConfig config = getConfigBuilder(basePath)
+ .withPopulateMetaFields(false)
+
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder().withRemoteServerPort(timelineServicePort).build())
+ .withMetadataConfig(HoodieMetadataConfig.newBuilder()
+ .enable(true)
+ .withEnableRecordIndex(true)
+ .withStreamingWriteEnabled(true)
+ .withSecondaryIndexEnabled(true)
+ .withSecondaryIndexName("sec-rider")
+ .withSecondaryIndexForColumn("rider")
+ .build())
+
.withKeyGenerator(KeyGeneratorForDataGeneratorRecords.class.getCanonicalName())
+ .build();
+ HoodieSparkCopyOnWriteTable table = (HoodieSparkCopyOnWriteTable)
HoodieSparkTable.create(config, new HoodieLocalEngineContext(storageConf),
metaClient);
+ HoodieTableMetadataWriter metadataWriter =
SparkMetadataWriterFactory.create(storageConf, config, context,
table.getMetaClient().getTableConfig());
+ metadataWriter.close();
+
+ // one round per partition
+ String partitionPath = HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[0];
+ // init some args
+ String fileId = UUID.randomUUID().toString();
+ String instantTime = "000";
+
+ // Create a parquet file
+ config.setSchema(TRIP_EXAMPLE_SCHEMA);
+ metaClient = HoodieTableMetaClient.reload(metaClient);
+ table = (HoodieSparkCopyOnWriteTable)
HoodieSparkCopyOnWriteTable.create(config, context, metaClient);
+ HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(new
String[] {partitionPath});
+
+ Pair<WriteStatus, List<HoodieRecord>> statusListPair =
createParquetFile(config, table, partitionPath, fileId, instantTime,
dataGenerator, false);
+ WriteStatus writeStatus = statusListPair.getLeft();
+ List<HoodieRecord> records = statusListPair.getRight();
+ assertEquals(records.size(), writeStatus.getTotalRecords());
+ assertEquals(0, writeStatus.getTotalErrorRecords());
+
+ instantTime = "001";
+ List<HoodieRecord> updates =
dataGenerator.generateUniqueUpdates(instantTime, 10);
+ HoodieMergeHandle mergeHandle = new HoodieMergeHandle(config, instantTime,
table, updates.iterator(), partitionPath, fileId, new
LocalTaskContextSupplier(),
+ new HoodieBaseFile(writeStatus.getStat().getPath()), Option.of(new
KeyGeneratorForDataGeneratorRecords(config.getProps())));
+ HoodieMergeHelper.newInstance().runMerge(table, mergeHandle);
+ writeStatus = mergeHandle.writeStatus;
+ // verify stats after merge
+ assertEquals(records.size(), writeStatus.getStat().getNumWrites());
+ assertEquals(10, writeStatus.getStat().getNumUpdateWrites());
+ // verify secondary index stats
+ assertEquals(1,
writeStatus.getIndexStats().getSecondaryIndexStats().size());
+ // 10 si records for old secondary keys and 10 for new secondary keys
+ assertEquals(20,
writeStatus.getIndexStats().getSecondaryIndexStats().values().stream().findFirst().get().size());
Review Comment:
Addressed
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]