This is an automated email from the ASF dual-hosted git repository. vhs pushed a commit to branch release-1.0.2 in repository https://gitbox.apache.org/repos/asf/hudi.git
commit eaba7bfcd2042ebce70d8dbb0022a9835fcfc119 Author: Danny Chan <[email protected]> AuthorDate: Mon Apr 21 16:32:46 2025 +0800 [HUDI-9327] Remove the unnecessry HoodieMetadataConfig build (#13182) * [HUDI-9327] Remove the unnecessry HoodieMetadataConfig build * fix EI test --------- Co-authored-by: Sagar Sumit <[email protected]> (cherry picked from commit 7945ba968df1bcdcb6ac5d1b72e3d7bc3a20e4bc) --- .../apache/hudi/metadata/HoodieBackedTableMetadataWriter.java | 7 +++---- .../HoodieBackedTableMetadataWriterTableVersionSix.java | 5 ++--- .../java/org/apache/hudi/metadata/MetadataPartitionType.java | 8 ++------ .../org/apache/hudi/metadata/TestMetadataPartitionType.java | 8 ++++---- .../spark/sql/hudi/feature/index/TestExpressionIndex.scala | 11 +++++------ 5 files changed, 16 insertions(+), 23 deletions(-) diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java index c9c0c637cf6..76bd944eff2 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java @@ -26,7 +26,6 @@ import org.apache.hudi.avro.model.HoodieRestorePlan; import org.apache.hudi.avro.model.HoodieRollbackMetadata; import org.apache.hudi.client.BaseHoodieWriteClient; import org.apache.hudi.common.config.HoodieMetadataConfig; -import org.apache.hudi.common.config.TypedProperties; import org.apache.hudi.common.data.HoodieData; import org.apache.hudi.common.engine.EngineType; import org.apache.hudi.common.engine.HoodieEngineContext; @@ -181,7 +180,7 @@ public abstract class HoodieBackedTableMetadataWriter<I> implements HoodieTableM this.dataMetaClient = HoodieTableMetaClient.builder().setConf(storageConf.newInstance()) .setBasePath(dataWriteConfig.getBasePath()) .setTimeGeneratorConfig(dataWriteConfig.getTimeGeneratorConfig()).build(); - this.enabledPartitionTypes = getEnabledPartitions(dataWriteConfig.getProps(), dataMetaClient); + this.enabledPartitionTypes = getEnabledPartitions(dataWriteConfig.getMetadataConfig(), dataMetaClient); if (writeConfig.isMetadataTableEnabled()) { this.metadataWriteConfig = createMetadataWriteConfig(writeConfig, failedWritesCleaningPolicy); try { @@ -194,8 +193,8 @@ public abstract class HoodieBackedTableMetadataWriter<I> implements HoodieTableM ValidationUtils.checkArgument(!initialized || this.metadata != null, "MDT Reader should have been opened post initialization"); } - List<MetadataPartitionType> getEnabledPartitions(TypedProperties writeConfigProps, HoodieTableMetaClient metaClient) { - return MetadataPartitionType.getEnabledPartitions(writeConfigProps, metaClient); + List<MetadataPartitionType> getEnabledPartitions(HoodieMetadataConfig metadataConfig, HoodieTableMetaClient metaClient) { + return MetadataPartitionType.getEnabledPartitions(metadataConfig, metaClient); } abstract HoodieTable getTable(HoodieWriteConfig writeConfig, HoodieTableMetaClient metaClient); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriterTableVersionSix.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriterTableVersionSix.java index bab6b7515fc..57e0e93290b 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriterTableVersionSix.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriterTableVersionSix.java @@ -21,7 +21,6 @@ package org.apache.hudi.metadata; import org.apache.hudi.avro.model.HoodieRollbackMetadata; import org.apache.hudi.client.BaseHoodieWriteClient; import org.apache.hudi.common.config.HoodieMetadataConfig; -import org.apache.hudi.common.config.TypedProperties; import org.apache.hudi.common.engine.HoodieEngineContext; import org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy; import org.apache.hudi.common.table.HoodieTableMetaClient; @@ -78,8 +77,8 @@ public abstract class HoodieBackedTableMetadataWriterTableVersionSix<I> extends } @Override - List<MetadataPartitionType> getEnabledPartitions(TypedProperties writeConfigProps, HoodieTableMetaClient metaClient) { - return MetadataPartitionType.getEnabledPartitions(writeConfigProps, metaClient).stream() + List<MetadataPartitionType> getEnabledPartitions(HoodieMetadataConfig metadataConfig, HoodieTableMetaClient metaClient) { + return MetadataPartitionType.getEnabledPartitions(metadataConfig, metaClient).stream() .filter(partition -> !partition.equals(MetadataPartitionType.SECONDARY_INDEX)) .filter(partition -> !partition.equals(MetadataPartitionType.EXPRESSION_INDEX)) .filter(partition -> !partition.equals(MetadataPartitionType.PARTITION_STATS)) diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java b/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java index 6b1364a40fe..6d35f63621e 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java +++ b/hudi-common/src/main/java/org/apache/hudi/metadata/MetadataPartitionType.java @@ -24,7 +24,6 @@ import org.apache.hudi.avro.model.HoodieMetadataFileInfo; import org.apache.hudi.avro.model.HoodieRecordIndexInfo; import org.apache.hudi.avro.model.HoodieSecondaryIndexInfo; import org.apache.hudi.common.config.HoodieMetadataConfig; -import org.apache.hudi.common.config.TypedProperties; import org.apache.hudi.common.model.HoodieIndexDefinition; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.util.StringUtils; @@ -45,8 +44,6 @@ import java.util.stream.Collectors; import static org.apache.hudi.avro.HoodieAvroUtils.unwrapAvroValueWrapper; import static org.apache.hudi.avro.HoodieAvroUtils.wrapValueIntoAvro; -import static org.apache.hudi.common.config.HoodieMetadataConfig.ENABLE; -import static org.apache.hudi.common.util.ConfigUtils.getBooleanWithAltKeys; import static org.apache.hudi.common.util.TypeUtils.unsafeCast; import static org.apache.hudi.common.util.ValidationUtils.checkArgument; import static org.apache.hudi.common.util.ValidationUtils.checkState; @@ -451,11 +448,10 @@ public enum MetadataPartitionType { /** * Returns the list of metadata partition types enabled based on the metadata config and table config. */ - public static List<MetadataPartitionType> getEnabledPartitions(TypedProperties writeConfig, HoodieTableMetaClient metaClient) { - if (!getBooleanWithAltKeys(writeConfig, ENABLE)) { + public static List<MetadataPartitionType> getEnabledPartitions(HoodieMetadataConfig dataMetadataConfig, HoodieTableMetaClient metaClient) { + if (!dataMetadataConfig.isEnabled()) { return Collections.emptyList(); } - HoodieMetadataConfig dataMetadataConfig = HoodieMetadataConfig.newBuilder().fromProperties(writeConfig).build(); return Arrays.stream(getValidValues()) .filter(partitionType -> partitionType.isMetadataPartitionEnabled(dataMetadataConfig) || partitionType.isMetadataPartitionAvailable(metaClient)) .collect(Collectors.toList()); diff --git a/hudi-common/src/test/java/org/apache/hudi/metadata/TestMetadataPartitionType.java b/hudi-common/src/test/java/org/apache/hudi/metadata/TestMetadataPartitionType.java index 186ed1896ca..d27ec28b92e 100644 --- a/hudi-common/src/test/java/org/apache/hudi/metadata/TestMetadataPartitionType.java +++ b/hudi-common/src/test/java/org/apache/hudi/metadata/TestMetadataPartitionType.java @@ -84,7 +84,7 @@ public class TestMetadataPartitionType { break; } - List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfigBuilder.build().getProps(), metaClient); + List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfigBuilder.build(), metaClient); // Verify partition type is enabled due to config assertEquals(expectedEnabledPartitions, enabledPartitions.size()); @@ -103,7 +103,7 @@ public class TestMetadataPartitionType { Mockito.when(metaClient.getTableConfig().isMetadataPartitionAvailable(MetadataPartitionType.RECORD_INDEX)).thenReturn(true); HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder().enable(true).withEnableRecordIndex(false).build(); - List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfig.getProps(), metaClient); + List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfig, metaClient); // Verify RECORD_INDEX and FILES is enabled due to availability, and COLUMN_STATS and PARTITION_STATS by default assertEquals(4, enabledPartitions.size(), "RECORD_INDEX, FILES, COL_STATS, PARTITION_STATS should be available"); @@ -124,7 +124,7 @@ public class TestMetadataPartitionType { Mockito.when(metaClient.getTableConfig().isMetadataPartitionAvailable(Mockito.any())).thenReturn(false); HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder().enable(false).build(); - List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfig.getProps(), metaClient); + List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfig, metaClient); // Verify no partitions are enabled assertTrue(enabledPartitions.isEmpty(), "No partitions should be enabled"); @@ -144,7 +144,7 @@ public class TestMetadataPartitionType { Mockito.when(metaClient.getTableConfig().isMetadataPartitionAvailable(MetadataPartitionType.EXPRESSION_INDEX)).thenReturn(true); HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder().enable(true).build(); - List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfig.getProps(), metaClient); + List<MetadataPartitionType> enabledPartitions = MetadataPartitionType.getEnabledPartitions(metadataConfig, metaClient); // Verify EXPRESSION_INDEX and FILES is enabled due to availability, and COLUMN_STATS and PARTITION_STATS by default assertEquals(4, enabledPartitions.size(), "EXPRESSION_INDEX, FILES, COL_STATS and SECONDARY_INDEX should be available"); diff --git a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/feature/index/TestExpressionIndex.scala b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/feature/index/TestExpressionIndex.scala index e76eb8bf680..965727351ae 100644 --- a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/feature/index/TestExpressionIndex.scala +++ b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/feature/index/TestExpressionIndex.scala @@ -1973,10 +1973,10 @@ class TestExpressionIndex extends HoodieSparkSqlTestBase { if (shouldRollback) { // rollback the operation val lastCompletedInstant = metaClient.reloadActiveTimeline().getCommitsTimeline.filterCompletedInstants().lastInstant() - val writeConfig = getWriteConfig(Map.empty, metaClient.getBasePath.toString) - writeConfig.setValue("hoodie.metadata.index.column.stats.enable", "false") - writeConfig.setValue("hoodie.metadata.index.partition.stats.enable", "false") - val writeClient = new SparkRDDWriteClient(new HoodieSparkEngineContext(new JavaSparkContext(spark.sparkContext)), writeConfig) + val configBuilder = getWriteConfigBuilder(Map.empty, metaClient.getBasePath.toString) + configBuilder.withMetadataConfig(HoodieMetadataConfig.newBuilder() + .withMetadataIndexColumnStats(false).withMetadataIndexPartitionStats(false).build()) + val writeClient = new SparkRDDWriteClient(new HoodieSparkEngineContext(new JavaSparkContext(spark.sparkContext)), configBuilder.build()) writeClient.rollback(lastCompletedInstant.get().requestedTime) // validate the expression index checkAnswer(metadataSql)( @@ -2298,11 +2298,10 @@ class TestExpressionIndex extends HoodieSparkSqlTestBase { metaClient.getActiveTimeline) } - private def getWriteConfig(hudiOpts: Map[String, String], basePath: String): HoodieWriteConfig = { + private def getWriteConfigBuilder(hudiOpts: Map[String, String], basePath: String): HoodieWriteConfig.Builder = { val props = TypedProperties.fromMap(JavaConverters.mapAsJavaMapConverter(hudiOpts).asJava) HoodieWriteConfig.newBuilder() .withProps(props) .withPath(basePath) - .build() } }
