nsivabalan commented on code in PR #12948:
URL: https://github.com/apache/hudi/pull/12948#discussion_r2001850161
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -193,6 +194,10 @@ protected
HoodieBackedTableMetadataWriter(StorageConfiguration<?> storageConf,
ValidationUtils.checkArgument(!initialized || this.metadata != null, "MDT
Reader should have been opened post initialization");
}
+ List<MetadataPartitionType> getEnabledPartitions(TypedProperties
writeConfigProps, HoodieTableMetaClient metaClient) {
Review Comment:
again, lets try to ensure access specifiers are tight
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metadata/SparkHoodieBackedTableMetadataWriterTableVersionSix.java:
##########
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.metadata;
+
+import org.apache.hudi.client.BaseHoodieWriteClient;
+import org.apache.hudi.client.SparkRDDWriteClient;
+import org.apache.hudi.client.common.HoodieSparkEngineContext;
+import org.apache.hudi.common.data.HoodieData;
+import org.apache.hudi.common.engine.EngineType;
+import org.apache.hudi.common.engine.HoodieEngineContext;
+import org.apache.hudi.common.metrics.Registry;
+import org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy;
+import org.apache.hudi.common.model.HoodieIndexDefinition;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieTableType;
+import org.apache.hudi.common.model.WriteOperationType;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.util.CommitUtils;
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.collection.Pair;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.data.HoodieJavaRDD;
+import org.apache.hudi.exception.HoodieNotSupportedException;
+import org.apache.hudi.index.HoodieSparkIndexClient;
+import org.apache.hudi.metrics.DistributedRegistry;
+import org.apache.hudi.metrics.MetricsReporterType;
+import org.apache.hudi.storage.StorageConfiguration;
+import org.apache.hudi.table.HoodieSparkTable;
+import org.apache.hudi.table.HoodieTable;
+
+import org.apache.avro.Schema;
+import org.apache.spark.api.java.JavaRDD;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static
org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy.EAGER;
+
+public class SparkHoodieBackedTableMetadataWriterTableVersionSix extends
HoodieBackedTableMetadataWriterTableVersionSix<JavaRDD<HoodieRecord>> {
+
+ private static final Logger LOG =
LoggerFactory.getLogger(SparkHoodieBackedTableMetadataWriter.class);
+
+ public static HoodieTableMetadataWriter create(StorageConfiguration<?> conf,
+ HoodieWriteConfig writeConfig,
+ HoodieEngineContext context,
+ Option<String>
inflightInstantTimestamp) {
+ return new SparkHoodieBackedTableMetadataWriterTableVersionSix(
+ conf, writeConfig, EAGER, context, inflightInstantTimestamp);
+ }
+
+ public static HoodieTableMetadataWriter create(StorageConfiguration<?> conf,
+ HoodieWriteConfig writeConfig,
+
HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy,
+ HoodieEngineContext context,
+ Option<String>
inflightInstantTimestamp) {
+ return new SparkHoodieBackedTableMetadataWriterTableVersionSix(
+ conf, writeConfig, failedWritesCleaningPolicy, context,
inflightInstantTimestamp);
+ }
+
+ public static HoodieTableMetadataWriter create(StorageConfiguration<?> conf,
HoodieWriteConfig writeConfig,
+ HoodieEngineContext context) {
+ return create(conf, writeConfig, context, Option.empty());
+ }
+
+ SparkHoodieBackedTableMetadataWriterTableVersionSix(StorageConfiguration<?>
hadoopConf,
+ HoodieWriteConfig writeConfig,
+ HoodieFailedWritesCleaningPolicy
failedWritesCleaningPolicy,
+ HoodieEngineContext engineContext,
+ Option<String>
inflightInstantTimestamp) {
+ super(hadoopConf, writeConfig, failedWritesCleaningPolicy, engineContext,
inflightInstantTimestamp);
+ }
+
+ @Override
+ protected void initRegistry() {
+ if (metadataWriteConfig.isMetricsOn()) {
+ Registry registry;
+ if (metadataWriteConfig.isExecutorMetricsEnabled() &&
metadataWriteConfig.getMetricsReporterType() != MetricsReporterType.INMEMORY) {
+ registry = Registry.getRegistry("HoodieMetadata",
DistributedRegistry.class.getName());
+ HoodieSparkEngineContext sparkEngineContext =
(HoodieSparkEngineContext) engineContext;
+ ((DistributedRegistry)
registry).register(sparkEngineContext.getJavaSparkContext());
+ } else {
+ registry = Registry.getRegistry("HoodieMetadata");
+ }
+ this.metrics = Option.of(new
HoodieMetadataMetrics(metadataWriteConfig.getMetricsConfig(),
dataMetaClient.getStorage()));
+ } else {
+ this.metrics = Option.empty();
+ }
+ }
+
+ @Override
+ protected void commit(String instantTime, Map<String,
HoodieData<HoodieRecord>> partitionRecordsMap) {
+ commitInternal(instantTime, partitionRecordsMap, false, Option.empty());
+ }
+
+ @Override
+ protected JavaRDD<HoodieRecord>
convertHoodieDataToEngineSpecificData(HoodieData<HoodieRecord> records) {
+ return HoodieJavaRDD.getJavaRDD(records);
+ }
+
+ @Override
+ protected void bulkCommit(
+ String instantTime, String partitionName, HoodieData<HoodieRecord>
records,
+ int fileGroupCount) {
+ SparkHoodieMetadataBulkInsertPartitioner partitioner = new
SparkHoodieMetadataBulkInsertPartitioner(fileGroupCount);
+ commitInternal(instantTime, Collections.singletonMap(partitionName,
records), true, Option.of(partitioner));
+ }
+
+ @Override
+ public void deletePartitions(String instantTime, List<MetadataPartitionType>
partitions) {
Review Comment:
oh its a complete copy paste of SparkHoodieBackedTableMetadataWriter.
##########
hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/metadata/FlinkHoodieBackedTableMetadataWriter.java:
##########
@@ -133,7 +133,7 @@ protected void commitInternal(String instantTime,
Map<String, HoodieData<HoodieR
metadataMetaClient = HoodieTableMetaClient.reload(metadataMetaClient);
}
- compactIfNecessary(writeClient);
+ compactIfNecessary(writeClient, "");
Review Comment:
can we make 2nd arg as Option<String>
for table version 6, we can validate that this arg is present
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -1257,6 +1278,10 @@ public void update(HoodieRestoreMetadata
restoreMetadata, String instantTime) {
}
}
+ String createRestoreInstantTime() {
Review Comment:
protected
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -1552,7 +1577,7 @@ static HoodieActiveTimeline
runPendingTableServicesOperationsAndRefreshTimeline(
* 2. In multi-writer scenario, a parallel operation with a greater
instantTime may have completed creating a
* deltacommit.
*/
- protected void compactIfNecessary(BaseHoodieWriteClient writeClient) {
+ void compactIfNecessary(BaseHoodieWriteClient<?,I,?,?> writeClient, String
latestDeltacommitTime) {
Review Comment:
why removed protected?
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestColumnStatsIndexTableVersionSix.scala:
##########
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.functional
+
+import org.apache.hudi.common.table.HoodieTableConfig
+import org.apache.hudi.config.HoodieWriteConfig
+
+class TestColumnStatsIndexTableVersionSix extends TestColumnStatsIndex {
+
+ override val overrideOpts: Map[String, String] = Map(
+ HoodieTableConfig.VERSION.key() -> "6",
Review Comment:
we should avoid copying the entire test class.
##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/functional/TestRecordLevelIndexTableVersionSix.scala:
##########
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.functional
+
+import org.apache.hudi.common.table.HoodieTableConfig
+import org.apache.hudi.config.HoodieWriteConfig
+
+class TestRecordLevelIndexTableVersionSix extends TestRecordLevelIndex {
+ override def commonOpts: Map[String, String] = super.commonOpts ++ Map(
Review Comment:
why can't we do something similar for col stats test?
##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -155,8 +155,9 @@ public abstract class HoodieBackedTableMetadataWriter<I>
implements HoodieTableM
protected StorageConfiguration<?> storageConf;
protected final transient HoodieEngineContext engineContext;
protected final List<MetadataPartitionType> enabledPartitionTypes;
+
// Is the MDT bootstrapped and ready to be read from
- private boolean initialized = false;
+ boolean initialized = false;
Review Comment:
why not protected or private?
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metadata/SparkHoodieBackedTableMetadataWriterTableVersionSix.java:
##########
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.metadata;
+
+import org.apache.hudi.client.BaseHoodieWriteClient;
+import org.apache.hudi.client.SparkRDDWriteClient;
+import org.apache.hudi.client.common.HoodieSparkEngineContext;
+import org.apache.hudi.common.data.HoodieData;
+import org.apache.hudi.common.engine.EngineType;
+import org.apache.hudi.common.engine.HoodieEngineContext;
+import org.apache.hudi.common.metrics.Registry;
+import org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy;
+import org.apache.hudi.common.model.HoodieIndexDefinition;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieTableType;
+import org.apache.hudi.common.model.WriteOperationType;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.util.CommitUtils;
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.collection.Pair;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.data.HoodieJavaRDD;
+import org.apache.hudi.exception.HoodieNotSupportedException;
+import org.apache.hudi.index.HoodieSparkIndexClient;
+import org.apache.hudi.metrics.DistributedRegistry;
+import org.apache.hudi.metrics.MetricsReporterType;
+import org.apache.hudi.storage.StorageConfiguration;
+import org.apache.hudi.table.HoodieSparkTable;
+import org.apache.hudi.table.HoodieTable;
+
+import org.apache.avro.Schema;
+import org.apache.spark.api.java.JavaRDD;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static
org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy.EAGER;
+
+public class SparkHoodieBackedTableMetadataWriterTableVersionSix extends
HoodieBackedTableMetadataWriterTableVersionSix<JavaRDD<HoodieRecord>> {
+
+ private static final Logger LOG =
LoggerFactory.getLogger(SparkHoodieBackedTableMetadataWriter.class);
+
+ public static HoodieTableMetadataWriter create(StorageConfiguration<?> conf,
+ HoodieWriteConfig writeConfig,
+ HoodieEngineContext context,
+ Option<String>
inflightInstantTimestamp) {
+ return new SparkHoodieBackedTableMetadataWriterTableVersionSix(
+ conf, writeConfig, EAGER, context, inflightInstantTimestamp);
+ }
+
+ public static HoodieTableMetadataWriter create(StorageConfiguration<?> conf,
+ HoodieWriteConfig writeConfig,
+
HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy,
+ HoodieEngineContext context,
+ Option<String>
inflightInstantTimestamp) {
+ return new SparkHoodieBackedTableMetadataWriterTableVersionSix(
+ conf, writeConfig, failedWritesCleaningPolicy, context,
inflightInstantTimestamp);
+ }
+
+ public static HoodieTableMetadataWriter create(StorageConfiguration<?> conf,
HoodieWriteConfig writeConfig,
+ HoodieEngineContext context) {
+ return create(conf, writeConfig, context, Option.empty());
+ }
+
+ SparkHoodieBackedTableMetadataWriterTableVersionSix(StorageConfiguration<?>
hadoopConf,
+ HoodieWriteConfig writeConfig,
+ HoodieFailedWritesCleaningPolicy
failedWritesCleaningPolicy,
+ HoodieEngineContext engineContext,
+ Option<String>
inflightInstantTimestamp) {
+ super(hadoopConf, writeConfig, failedWritesCleaningPolicy, engineContext,
inflightInstantTimestamp);
+ }
+
+ @Override
+ protected void initRegistry() {
+ if (metadataWriteConfig.isMetricsOn()) {
Review Comment:
why overriding this? is this diff in v 6 and v8?
##########
hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieTestUtils.java:
##########
@@ -233,6 +233,10 @@ public static HoodieTableMetaClient.TableBuilder
getMetaClientBuilder(HoodieTabl
builder.setKeyGeneratorType(properties.getProperty(HoodieTableConfig.KEY_GENERATOR_TYPE.key()));
}
+ if (properties.containsKey("hoodie.write.table.version")) {
Review Comment:
lets try to use the variables already defined for this. also in L 237
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]