This is an automated email from the ASF dual-hosted git repository. vinoth pushed a commit to branch rfc-15 in repository https://gitbox.apache.org/repos/asf/hudi.git
commit b16586d503c34b79d80f5b806b401de189f6db3f Author: Prashant Wason <[email protected]> AuthorDate: Tue Oct 20 01:47:49 2020 -0700 [HUDI-1321] Created HoodieMetadataConfig to specify configuration for the metadata table. This is safer than full-fledged properties for the metadata table (like HoodieWriteConfig) as it makes burdensome to tune the metadata. With limited configuration, we can control the performance of the metadata table closely. --- .../apache/hudi/config/HoodieMetadataConfig.java | 152 +++++++++++++++++++++ .../org/apache/hudi/config/HoodieWriteConfig.java | 79 ++++------- .../apache/hudi/metadata/HoodieMetadataWriter.java | 37 ++++- .../apache/hudi/metadata/TestHoodieMetadata.java | 31 +++-- 4 files changed, 228 insertions(+), 71 deletions(-) diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieMetadataConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieMetadataConfig.java new file mode 100644 index 0000000..ca9c723 --- /dev/null +++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieMetadataConfig.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.config; + +import org.apache.hudi.common.config.DefaultHoodieConfig; +import org.apache.hudi.config.HoodieCompactionConfig.Builder; + +import javax.annotation.concurrent.Immutable; + +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.util.Properties; + +/** + * Configurations used by the HUDI Metadata Table. + */ +@Immutable +public class HoodieMetadataConfig extends DefaultHoodieConfig { + + public static final String METADATA_PREFIX = "hoodie.metadata"; + + // Enable the internal Metadata Table which saves file listings + public static final String METADATA_ENABLE = METADATA_PREFIX + ".enable"; + public static final boolean DEFAULT_METADATA_ENABLE = false; + + // Validate contents of Metadata Table on each access against the actual filesystem + public static final String METADATA_VALIDATE = METADATA_PREFIX + ".validate"; + public static final boolean DEFAULT_METADATA_VALIDATE = false; + + // Parallelism for inserts + public static final String INSERT_PARALLELISM = METADATA_PREFIX + ".insert.parallelism"; + public static final int DEFAULT_INSERT_PARALLELISM = 1; + + // Async clean + public static final String ASYNC_CLEAN = METADATA_PREFIX + ".clean.async"; + public static final boolean DEFAULT_ASYNC_CLEAN = false; + + // Maximum delta commits before compaction occurs + public static final String COMPACT_NUM_DELTA_COMMITS = METADATA_PREFIX + ".compact.max.delta.commits"; + public static final int DEFAULT_COMPACT_NUM_DELTA_COMMITS = 24; + + // Archival settings + public static final String MIN_COMMITS_TO_KEEP = METADATA_PREFIX + ".keep.min.commits"; + public static final int DEFAULT_MIN_COMMITS_TO_KEEP = 20; + public static final String MAX_COMMITS_TO_KEEP = METADATA_PREFIX + ".keep.max.commits"; + public static final int DEFAULT_MAX_COMMITS_TO_KEEP = 30; + + // Cleaner commits retained + public static final String CLEANER_COMMITS_RETAINED = METADATA_PREFIX + ".cleaner.commits.retained"; + public static final int DEFAULT_CLEANER_COMMITS_RETAINED = 3; + + private HoodieMetadataConfig(Properties props) { + super(props); + } + + public static HoodieMetadataConfig.Builder newBuilder() { + return new Builder(); + } + + public static class Builder { + + private final Properties props = new Properties(); + + public Builder fromFile(File propertiesFile) throws IOException { + try (FileReader reader = new FileReader(propertiesFile)) { + this.props.load(reader); + return this; + } + } + + public Builder fromProperties(Properties props) { + this.props.putAll(props); + return this; + } + + public Builder enable(boolean enable) { + props.setProperty(METADATA_ENABLE, String.valueOf(enable)); + return this; + } + + public Builder validate(boolean validate) { + props.setProperty(METADATA_VALIDATE, String.valueOf(validate)); + return this; + } + + public Builder withInsertParallelism(int parallelism) { + props.setProperty(INSERT_PARALLELISM, String.valueOf(parallelism)); + return this; + } + + public Builder withAsyncClean(boolean asyncClean) { + props.setProperty(ASYNC_CLEAN, String.valueOf(asyncClean)); + return this; + } + + public Builder withMaxNumDeltaCommitsBeforeCompaction(int maxNumDeltaCommitsBeforeCompaction) { + props.setProperty(COMPACT_NUM_DELTA_COMMITS, String.valueOf(maxNumDeltaCommitsBeforeCompaction)); + return this; + } + + public Builder archiveCommitsWith(int minToKeep, int maxToKeep) { + props.setProperty(MIN_COMMITS_TO_KEEP, String.valueOf(minToKeep)); + props.setProperty(MAX_COMMITS_TO_KEEP, String.valueOf(maxToKeep)); + return this; + } + + public Builder retainCommits(int commitsRetained) { + props.setProperty(CLEANER_COMMITS_RETAINED, String.valueOf(commitsRetained)); + return this; + } + + public HoodieMetadataConfig build() { + HoodieMetadataConfig config = new HoodieMetadataConfig(props); + setDefaultOnCondition(props, !props.containsKey(METADATA_ENABLE), METADATA_ENABLE, + String.valueOf(DEFAULT_METADATA_ENABLE)); + setDefaultOnCondition(props, !props.containsKey(METADATA_VALIDATE), METADATA_VALIDATE, + String.valueOf(DEFAULT_METADATA_VALIDATE)); + setDefaultOnCondition(props, !props.containsKey(INSERT_PARALLELISM), INSERT_PARALLELISM, + String.valueOf(DEFAULT_INSERT_PARALLELISM)); + setDefaultOnCondition(props, !props.containsKey(ASYNC_CLEAN), ASYNC_CLEAN, + String.valueOf(DEFAULT_ASYNC_CLEAN)); + setDefaultOnCondition(props, !props.containsKey(COMPACT_NUM_DELTA_COMMITS), + COMPACT_NUM_DELTA_COMMITS, String.valueOf(DEFAULT_COMPACT_NUM_DELTA_COMMITS)); + setDefaultOnCondition(props, !props.containsKey(CLEANER_COMMITS_RETAINED), CLEANER_COMMITS_RETAINED, + String.valueOf(DEFAULT_CLEANER_COMMITS_RETAINED)); + setDefaultOnCondition(props, !props.containsKey(MAX_COMMITS_TO_KEEP), MAX_COMMITS_TO_KEEP, + String.valueOf(DEFAULT_MAX_COMMITS_TO_KEEP)); + setDefaultOnCondition(props, !props.containsKey(MIN_COMMITS_TO_KEEP), MIN_COMMITS_TO_KEEP, + String.valueOf(DEFAULT_MIN_COMMITS_TO_KEEP)); + + return config; + } + } + +} diff --git a/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java b/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java index 82199ff..d8d732f 100644 --- a/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java +++ b/hudi-client/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java @@ -118,17 +118,6 @@ public class HoodieWriteConfig extends DefaultHoodieConfig { public static final String MAX_CONSISTENCY_CHECKS_PROP = "hoodie.consistency.check.max_checks"; public static int DEFAULT_MAX_CONSISTENCY_CHECKS = 7; - // Enable the internal Metadata Table which saves file listings - private static final String USE_FILE_LISTING_METADATA = "hoodie.metadata.file.listings.enable"; - private static final String DEFAULT_USE_FILE_LISTING_METADATA = "false"; - - // Validate contents of Metadata Table on each access against the actual filesystem - private static final String FILE_LISTING_METADATA_VERIFY = "hoodie.metadata.file.listings.verify"; - private static final String DEFAULT_FILE_LISTING_METADATA_VERIFY = "false"; - - // Serialized compaction config to be used for Metadata Table - public static final String HOODIE_METADATA_COMPACTION_CONFIG = "hoodie.metadata.compaction.config"; - /** * HUDI-858 : There are users who had been directly using RDD APIs and have relied on a behavior in 0.4.x to allow * multiple write operations (upsert/buk-insert/...) to be executed within a single commit. @@ -776,30 +765,35 @@ public class HoodieWriteConfig extends DefaultHoodieConfig { * File listing metadata configs. */ public boolean useFileListingMetadata() { - return Boolean.parseBoolean(props.getProperty(USE_FILE_LISTING_METADATA)); + return Boolean.parseBoolean(props.getProperty(HoodieMetadataConfig.METADATA_ENABLE)); } public boolean getFileListingMetadataVerify() { - return Boolean.parseBoolean(props.getProperty(FILE_LISTING_METADATA_VERIFY)); + return Boolean.parseBoolean(props.getProperty(HoodieMetadataConfig.METADATA_VALIDATE)); } - public HoodieCompactionConfig getMetadataCompactionConfig() throws IOException { - String serializedCompactionConfig = props.getProperty(HOODIE_METADATA_COMPACTION_CONFIG); - if (serializedCompactionConfig != null) { - StringReader reader = new StringReader(serializedCompactionConfig); - Properties loadedProps = new Properties(); - loadedProps.load(reader); - return HoodieCompactionConfig.newBuilder().fromProperties(loadedProps).build(); - } + public int getMetadataInsertParallelism() { + return Integer.parseInt(props.getProperty(HoodieMetadataConfig.INSERT_PARALLELISM)); + } + + public int getMetadataCompactDeltaCommitMax() { + return Integer.parseInt(props.getProperty(HoodieMetadataConfig.COMPACT_NUM_DELTA_COMMITS)); + } + + public boolean isMetadataAsyncClean() { + return Boolean.parseBoolean(props.getProperty(HoodieMetadataConfig.ASYNC_CLEAN)); + } + + public int getMetadataMaxCommitsToKeep() { + return Integer.parseInt(props.getProperty(HoodieMetadataConfig.MAX_COMMITS_TO_KEEP)); + } + + public int getMetadataMinCommitsToKeep() { + return Integer.parseInt(props.getProperty(HoodieMetadataConfig.MIN_COMMITS_TO_KEEP)); + } - // Default config for compacting metadata tables - return HoodieCompactionConfig.newBuilder() - .withAutoClean(true) - .withInlineCompaction(true) - .withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS) - .archiveCommitsWith(24, 30) - .withMaxNumDeltaCommitsBeforeCompaction(24) - .build(); + public int getMetadataCleanerCommitsRetained() { + return Integer.parseInt(props.getProperty(HoodieMetadataConfig.CLEANER_COMMITS_RETAINED)); } public static class Builder { @@ -814,6 +808,7 @@ public class HoodieWriteConfig extends DefaultHoodieConfig { private boolean isViewConfigSet = false; private boolean isConsistencyGuardSet = false; private boolean isCallbackConfigSet = false; + private boolean isMetadataConfigSet = false; public Builder fromFile(File propertiesFile) throws IOException { try (FileReader reader = new FileReader(propertiesFile)) { @@ -931,12 +926,9 @@ public class HoodieWriteConfig extends DefaultHoodieConfig { return this; } - public Builder withMetadataCompactionConfig(HoodieCompactionConfig compactionConfig) throws IOException { - // Since the property names from HoodieCompactionConfig are already used in withCompactionConfig, - // metadata compaction config can only be saved serialized. - StringWriter writer = new StringWriter(); - compactionConfig.getProps().store(writer, "metadata compaction config"); - props.setProperty(HOODIE_METADATA_COMPACTION_CONFIG, writer.toString()); + public Builder withMetadataConfig(HoodieMetadataConfig metadataConfig) { + props.putAll(metadataConfig.getProps()); + isMetadataConfigSet = true; return this; } @@ -1026,16 +1018,6 @@ public class HoodieWriteConfig extends DefaultHoodieConfig { return this; } - public Builder withUseFileListingMetadata(boolean enable) { - props.setProperty(USE_FILE_LISTING_METADATA, String.valueOf(enable)); - return this; - } - - public Builder withFileListingMetadataVerify(boolean enable) { - props.setProperty(FILE_LISTING_METADATA_VERIFY, String.valueOf(enable)); - return this; - } - protected void setDefaults() { // Check for mandatory properties setDefaultOnCondition(props, !props.containsKey(INSERT_PARALLELISM), INSERT_PARALLELISM, DEFAULT_PARALLELISM); @@ -1081,11 +1063,6 @@ public class HoodieWriteConfig extends DefaultHoodieConfig { setDefaultOnCondition(props, !props.containsKey(BULKINSERT_SORT_MODE), BULKINSERT_SORT_MODE, DEFAULT_BULKINSERT_SORT_MODE); - setDefaultOnCondition(props, !props.containsKey(USE_FILE_LISTING_METADATA), USE_FILE_LISTING_METADATA, - DEFAULT_USE_FILE_LISTING_METADATA); - setDefaultOnCondition(props, !props.containsKey(FILE_LISTING_METADATA_VERIFY), FILE_LISTING_METADATA_VERIFY, - DEFAULT_FILE_LISTING_METADATA_VERIFY); - // Make sure the props is propagated setDefaultOnCondition(props, !isIndexConfigSet, HoodieIndexConfig.newBuilder().fromProperties(props).build()); setDefaultOnCondition(props, !isStorageConfigSet, HoodieStorageConfig.newBuilder().fromProperties(props).build()); @@ -1101,6 +1078,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig { ConsistencyGuardConfig.newBuilder().fromProperties(props).build()); setDefaultOnCondition(props, !isCallbackConfigSet, HoodieWriteCommitCallbackConfig.newBuilder().fromProperties(props).build()); + setDefaultOnCondition(props, !isMetadataConfigSet, + HoodieMetadataConfig.newBuilder().fromProperties(props).build()); setDefaultOnCondition(props, !props.containsKey(EXTERNAL_RECORD_AND_SCHEMA_TRANSFORMATION), EXTERNAL_RECORD_AND_SCHEMA_TRANSFORMATION, DEFAULT_EXTERNAL_RECORD_AND_SCHEMA_TRANSFORMATION); diff --git a/hudi-client/src/main/java/org/apache/hudi/metadata/HoodieMetadataWriter.java b/hudi-client/src/main/java/org/apache/hudi/metadata/HoodieMetadataWriter.java index 5491451..936d294 100644 --- a/hudi-client/src/main/java/org/apache/hudi/metadata/HoodieMetadataWriter.java +++ b/hudi-client/src/main/java/org/apache/hudi/metadata/HoodieMetadataWriter.java @@ -47,6 +47,7 @@ import org.apache.hudi.common.fs.ConsistencyGuardConfig; import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.metrics.Registry; import org.apache.hudi.common.model.HoodieBaseFile; +import org.apache.hudi.common.model.HoodieCleaningPolicy; import org.apache.hudi.common.model.HoodieCommitMetadata; import org.apache.hudi.common.model.HoodieFileFormat; import org.apache.hudi.common.model.HoodieLogFile; @@ -59,11 +60,15 @@ import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.TimelineLayout; import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; +import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; import org.apache.hudi.common.table.view.TableFileSystemView.SliceView; import org.apache.hudi.common.util.CleanerUtils; import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.ValidationUtils; +import org.apache.hudi.config.HoodieCompactionConfig; +import org.apache.hudi.config.HoodieMetadataConfig; import org.apache.hudi.config.HoodieMetricsConfig; import org.apache.hudi.config.HoodieWriteConfig; import org.apache.hudi.exception.HoodieException; @@ -97,7 +102,15 @@ public class HoodieMetadataWriter extends HoodieMetadataReader implements Serial private static Map<String, HoodieMetadataWriter> instances = new HashMap<>(); public static HoodieMetadataWriter instance(Configuration conf, HoodieWriteConfig writeConfig) { - return instances.computeIfAbsent(writeConfig.getBasePath(), k -> { + String key = writeConfig.getBasePath(); + if (instances.containsKey(key)) { + if (instances.get(key).enabled() != writeConfig.useFileListingMetadata()) { + // Enabled state has changed. Remove so it is recreated. + instances.remove(key); + } + } + + return instances.computeIfAbsent(key, k -> { try { return new HoodieMetadataWriter(conf, writeConfig); } catch (IOException e) { @@ -141,17 +154,18 @@ public class HoodieMetadataWriter extends HoodieMetadataReader implements Serial * @param schemaStr Metadata Table schema */ private HoodieWriteConfig createMetadataWriteConfig(HoodieWriteConfig writeConfig) throws IOException { + int parallelism = writeConfig.getMetadataInsertParallelism(); + // Create the write config for the metadata table by borrowing options from the main write config. HoodieWriteConfig.Builder builder = HoodieWriteConfig.newBuilder() - .withTimelineLayoutVersion(writeConfig.getTimelineLayoutVersion()) + .withTimelineLayoutVersion(TimelineLayoutVersion.CURR_VERSION) .withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder() .withConsistencyCheckEnabled(writeConfig.getConsistencyGuardConfig().isConsistencyCheckEnabled()) .withInitialConsistencyCheckIntervalMs(writeConfig.getConsistencyGuardConfig().getInitialConsistencyCheckIntervalMs()) .withMaxConsistencyCheckIntervalMs(writeConfig.getConsistencyGuardConfig().getMaxConsistencyCheckIntervalMs()) .withMaxConsistencyChecks(writeConfig.getConsistencyGuardConfig().getMaxConsistencyChecks()) .build()) - .withUseFileListingMetadata(false) - .withFileListingMetadataVerify(false) + .withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).build()) .withAutoCommit(true) .withAvroSchemaValidate(true) .withEmbeddedTimelineServerEnabled(false) @@ -159,8 +173,19 @@ public class HoodieMetadataWriter extends HoodieMetadataReader implements Serial .withPath(metadataBasePath) .withSchema(HoodieMetadataRecord.getClassSchema().toString()) .forTable(tableName) - .withParallelism(1, 1).withDeleteParallelism(1).withRollbackParallelism(1).withFinalizeWriteParallelism(1) - .withCompactionConfig(writeConfig.getMetadataCompactionConfig()); + .withCompactionConfig(HoodieCompactionConfig.newBuilder() + .withAsyncClean(writeConfig.isMetadataAsyncClean()) + .withAutoClean(true) + .withCleanerParallelism(parallelism) + .withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS) + .retainCommits(writeConfig.getMetadataCleanerCommitsRetained()) + .archiveCommitsWith(writeConfig.getMetadataMinCommitsToKeep(), writeConfig.getMetadataMaxCommitsToKeep()) + .withInlineCompaction(true) + .withMaxNumDeltaCommitsBeforeCompaction(writeConfig.getMetadataCompactDeltaCommitMax()).build()) + .withParallelism(parallelism, parallelism) + .withDeleteParallelism(parallelism) + .withRollbackParallelism(parallelism) + .withFinalizeWriteParallelism(parallelism); if (writeConfig.isMetricsOn()) { HoodieMetricsConfig.Builder metricsConfig = HoodieMetricsConfig.newBuilder() diff --git a/hudi-client/src/test/java/org/apache/hudi/metadata/TestHoodieMetadata.java b/hudi-client/src/test/java/org/apache/hudi/metadata/TestHoodieMetadata.java index 751e2ab..0c5839b 100644 --- a/hudi-client/src/test/java/org/apache/hudi/metadata/TestHoodieMetadata.java +++ b/hudi-client/src/test/java/org/apache/hudi/metadata/TestHoodieMetadata.java @@ -56,6 +56,7 @@ import org.apache.hudi.common.testutils.HoodieTestUtils; import org.apache.hudi.common.util.Option; import org.apache.hudi.config.HoodieCompactionConfig; import org.apache.hudi.config.HoodieIndexConfig; +import org.apache.hudi.config.HoodieMetadataConfig; import org.apache.hudi.config.HoodieMetricsConfig; import org.apache.hudi.config.HoodieStorageConfig; import org.apache.hudi.config.HoodieWriteConfig; @@ -461,11 +462,9 @@ public class TestHoodieMetadata extends HoodieClientTestHarness { // Test autoClean and asyncClean based on this flag which is randomly chosen. boolean asyncClean = new Random().nextBoolean(); HoodieWriteConfig config = getWriteConfigBuilder(true, true, false) - .withMetadataCompactionConfig(HoodieCompactionConfig.newBuilder() - .archiveCommitsWith(2, 4).retainCommits(1).retainFileVersions(1).withAutoClean(true) - .withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS) - .withInlineCompaction(true).withMaxNumDeltaCommitsBeforeCompaction(maxDeltaCommitsBeforeCompaction) - .build()) + .withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true) + .archiveCommitsWith(2, 4).retainCommits(1) + .withMaxNumDeltaCommitsBeforeCompaction(maxDeltaCommitsBeforeCompaction).build()) .withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(2, 3) .retainCommits(1).retainFileVersions(1).withAutoClean(true).withAsyncClean(asyncClean).build()) .build(); @@ -594,8 +593,7 @@ public class TestHoodieMetadata extends HoodieClientTestHarness { public void testMetadataMetrics() throws Exception { init(); - try (HoodieWriteClient client = new HoodieWriteClient<>(jsc, - getWriteConfigBuilder(true, true, true).withFileListingMetadataVerify(true).build())) { + try (HoodieWriteClient client = new HoodieWriteClient<>(jsc, getWriteConfigBuilder(true, true, true).build())) { // Write String newCommitTime = HoodieActiveTimeline.createNewInstantTime(); List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, 20); @@ -606,8 +604,12 @@ public class TestHoodieMetadata extends HoodieClientTestHarness { Registry metricsRegistry = Registry.getRegistry("HoodieMetadata"); assertTrue(metricsRegistry.getAllCounts().containsKey(HoodieMetadataWriter.INITIALIZE_STR + ".count")); - assertTrue(metricsRegistry.getAllCounts().containsKey(HoodieMetadataWriter.INITIALIZE_STR + ".duration")); + assertTrue(metricsRegistry.getAllCounts().containsKey(HoodieMetadataWriter.INITIALIZE_STR + ".totalDuration")); assertEquals(metricsRegistry.getAllCounts().get(HoodieMetadataWriter.INITIALIZE_STR + ".count"), 1L); + assertTrue(metricsRegistry.getAllCounts().containsKey("basefile.size")); + assertTrue(metricsRegistry.getAllCounts().containsKey("logfile.size")); + assertTrue(metricsRegistry.getAllCounts().containsKey("basefile.count")); + assertTrue(metricsRegistry.getAllCounts().containsKey("logfile.count")); } } @@ -617,11 +619,14 @@ public class TestHoodieMetadata extends HoodieClientTestHarness { * @throws IOException */ private void validateMetadata(HoodieWriteClient client) throws IOException { - long t1 = System.currentTimeMillis(); - HoodieWriteConfig config = client.getConfig(); HoodieMetadataWriter metadata = metadata(client); assertFalse(metadata == null, "MetadataWriter should have been initialized"); + if (!config.useFileListingMetadata()) { + return; + } + + long t1 = System.currentTimeMillis(); // Validate write config for metadata table HoodieWriteConfig metadataWriteConfig = metadata.getWriteConfig(); @@ -742,10 +747,6 @@ public class TestHoodieMetadata extends HoodieClientTestHarness { } } - private HoodieWriteConfig getWriteConfig() { - return getWriteConfig(true, true); - } - private HoodieWriteConfig getWriteConfig(boolean autoCommit, boolean useFileListingMetadata) { return getWriteConfigBuilder(autoCommit, useFileListingMetadata, false).build(); } @@ -763,7 +764,7 @@ public class TestHoodieMetadata extends HoodieClientTestHarness { .withFileSystemViewConfig(new FileSystemViewStorageConfig.Builder() .withEnableBackupForRemoteFileSystemView(false).build()) .withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.BLOOM).build()) - .withUseFileListingMetadata(useFileListingMetadata) + .withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(useFileListingMetadata).build()) .withMetricsConfig(HoodieMetricsConfig.newBuilder().withReporterType("CONSOLE").on(enableMetrics) .withExecutorMetrics(true).usePrefix("unit-test").build()); }
