This is an automated email from the ASF dual-hosted git repository.
sivabalan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new 2eaf0fd [HUDI-2472] Enabling Metadata table for some of TestCleaner
unit tests (#3803)
2eaf0fd is described below
commit 2eaf0fd939f680638e711f66cbdef1f7931b1ed1
Author: Manoj Govindassamy <[email protected]>
AuthorDate: Tue Nov 2 03:54:36 2021 -0700
[HUDI-2472] Enabling Metadata table for some of TestCleaner unit tests
(#3803)
- Making use of HoodieTableMetadataWriter when constructing the
HoodieMetadataTestTable
instance for the test to enable metadata table usage.
---
.../java/org/apache/hudi/table/TestCleaner.java | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
index 72f6a07..cb468e9 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
@@ -269,7 +269,7 @@ public class TestCleaner extends HoodieClientTestBase {
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(maxVersions).build())
.withParallelism(1,
1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
-
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).build())
+
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build())
.build();
try (SparkRDDWriteClient client = getHoodieWriteClient(cfg);) {
@@ -439,7 +439,7 @@ public class TestCleaner extends HoodieClientTestBase {
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits).build())
.withParallelism(1,
1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
-
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).build())
+
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build())
.build();
SparkRDDWriteClient client = getHoodieWriteClient(cfg);
@@ -516,7 +516,7 @@ public class TestCleaner extends HoodieClientTestBase {
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits).build())
.withParallelism(1,
1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
-
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).build())
+
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build())
.build();
SparkRDDWriteClient client = getHoodieWriteClient(cfg);
@@ -917,7 +917,7 @@ public class TestCleaner extends HoodieClientTestBase {
String file4P1C4 = partitionAndFileId005.get(p1);
replaceMetadata = generateReplaceCommitMetadata(p0, file3P1C2, file4P1C4);
testTable.addReplaceCommit("00000000000005",
Option.of(replaceMetadata.getKey()), Option.empty(),
replaceMetadata.getValue());
-
+
List<HoodieCleanStat> hoodieCleanStatsFive = runCleaner(config, 2);
assertTrue(testTable.baseFileExists(p0, "00000000000004", file4P0C3));
assertTrue(testTable.baseFileExists(p0, "00000000000002", file2P0C1));
@@ -925,7 +925,7 @@ public class TestCleaner extends HoodieClientTestBase {
assertFalse(testTable.baseFileExists(p0, "00000000000001", file1P0C0));
assertFalse(testTable.baseFileExists(p1, "00000000000001", file1P1C0));
}
-
+
private Pair<HoodieRequestedReplaceMetadata, HoodieReplaceCommitMetadata>
generateReplaceCommitMetadata(String partition,
String replacedFileId,
String newFileId) {
@@ -942,7 +942,7 @@ public class TestCleaner extends HoodieClientTestBase {
.setVersion(1).setExtraMetadata(Collections.emptyMap())
.setStrategy(HoodieClusteringStrategy.newBuilder().setStrategyClassName("").setVersion(1).build())
.setInputGroups(clusteringGroups).build());
-
+
HoodieReplaceCommitMetadata replaceMetadata = new
HoodieReplaceCommitMetadata();
replaceMetadata.addReplaceFileId(partition, replacedFileId);
replaceMetadata.setOperationType(WriteOperationType.CLUSTER);
@@ -1319,7 +1319,7 @@ public class TestCleaner extends HoodieClientTestBase {
@Test
public void testCleaningWithZeroPartitionPaths() throws Exception {
HoodieWriteConfig config =
HoodieWriteConfig.newBuilder().withPath(basePath)
-
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(false).build())
+
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(true).build())
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2).build())
.build();
@@ -1327,7 +1327,9 @@ public class TestCleaner extends HoodieClientTestBase {
// Make a commit, although there are no partitionPaths.
// Example use-case of this is when a client wants to create a table
// with just some commit metadata, but no data/partitionPaths.
- HoodieTestTable.of(metaClient).addCommit("000");
+ HoodieTableMetadataWriter metadataWriter =
SparkHoodieBackedTableMetadataWriter.create(hadoopConf, config, context);
+ HoodieTestTable testTable = HoodieMetadataTestTable.of(metaClient,
metadataWriter);
+ testTable.doWriteOperation("001", WriteOperationType.INSERT,
Collections.emptyList(), 1);
metaClient = HoodieTableMetaClient.reload(metaClient);
@@ -1341,7 +1343,7 @@ public class TestCleaner extends HoodieClientTestBase {
@Test
public void testKeepLatestCommitsWithPendingCompactions() throws Exception {
HoodieWriteConfig config =
HoodieWriteConfig.newBuilder().withPath(basePath)
-
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(false).build())
+
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(true).build())
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2).build())
.build();
@@ -1365,7 +1367,7 @@ public class TestCleaner extends HoodieClientTestBase {
public void testKeepLatestVersionsWithPendingCompactions(boolean
retryFailure) throws Exception {
HoodieWriteConfig config =
HoodieWriteConfig.newBuilder().withPath(basePath)
-
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
+
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(true).build())
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(2).build())
.build();