This is an automated email from the ASF dual-hosted git repository.

xushiyan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 6534a7f2c35 [HUDI-5923] Fix deletion of metadata table via cli (#8167)
6534a7f2c35 is described below

commit 6534a7f2c3524faf6cbf00c0d98efe64c82d8b94
Author: Sivabalan Narayanan <[email protected]>
AuthorDate: Sat May 20 13:56:02 2023 -0400

    [HUDI-5923] Fix deletion of metadata table via cli (#8167)
    
    
    ---------
    
    Co-authored-by: Raymond Xu <[email protected]>
---
 .../apache/hudi/cli/commands/MetadataCommand.java  |  13 ++-
 .../hudi/cli/commands/TestMetadataCommand.java     | 106 +++++++++++++++++++++
 2 files changed, 117 insertions(+), 2 deletions(-)

diff --git 
a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java 
b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java
index 1c4e62b8184..a9aefa2ab47 100644
--- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java
+++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/MetadataCommand.java
@@ -25,6 +25,8 @@ import org.apache.hudi.cli.utils.SparkUtil;
 import org.apache.hudi.client.common.HoodieSparkEngineContext;
 import org.apache.hudi.common.config.HoodieMetadataConfig;
 import org.apache.hudi.common.engine.HoodieLocalEngineContext;
+import org.apache.hudi.common.table.HoodieTableConfig;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
 import org.apache.hudi.common.util.HoodieTimer;
 import org.apache.hudi.common.util.Option;
 import org.apache.hudi.common.util.StringUtils;
@@ -55,6 +57,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import static 
org.apache.hudi.common.table.HoodieTableConfig.TABLE_METADATA_PARTITIONS;
+import static 
org.apache.hudi.common.table.HoodieTableConfig.TABLE_METADATA_PARTITIONS_INFLIGHT;
+
 /**
  * CLI commands to operate on the Metadata Table.
  * <p>
@@ -132,7 +137,7 @@ public class MetadataCommand {
 
   @ShellMethod(key = "metadata delete", value = "Remove the Metadata Table")
   public String delete() throws Exception {
-    HoodieCLI.getTableMetaClient();
+    HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient();
     Path metadataPath = new Path(getMetadataTableBasePath(HoodieCLI.basePath));
     try {
       FileStatus[] statuses = HoodieCLI.fs.listStatus(metadataPath);
@@ -143,6 +148,10 @@ public class MetadataCommand {
       // Metadata directory does not exist
     }
 
+    LOG.info("Clear hoodie.table.metadata.partitions in hoodie.properties");
+    HoodieTableConfig.delete(metaClient.getFs(), new 
Path(metaClient.getMetaPath()), new HashSet<>(Arrays
+        .asList(TABLE_METADATA_PARTITIONS.key(), 
TABLE_METADATA_PARTITIONS_INFLIGHT.key())));
+
     return String.format("Removed Metadata Table from %s", metadataPath);
   }
 
@@ -372,4 +381,4 @@ public class MetadataCommand {
       jsc = 
SparkUtil.initJavaSparkContext(SparkUtil.getDefaultConf("HoodieCLI", 
userDefinedMaster));
     }
   }
-}
\ No newline at end of file
+}
diff --git 
a/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestMetadataCommand.java 
b/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestMetadataCommand.java
new file mode 100644
index 00000000000..3214bb2cfcc
--- /dev/null
+++ 
b/hudi-cli/src/test/java/org/apache/hudi/cli/commands/TestMetadataCommand.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.cli.commands;
+
+import org.apache.hudi.cli.HoodieCLI;
+import org.apache.hudi.cli.functional.CLIFunctionalTestHarness;
+import org.apache.hudi.cli.testutils.ShellEvaluationResultUtil;
+import org.apache.hudi.client.SparkRDDWriteClient;
+import org.apache.hudi.client.WriteStatus;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieTableType;
+import org.apache.hudi.common.table.HoodieTableConfig;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
+import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.keygen.SimpleKeyGenerator;
+import org.apache.hudi.testutils.Assertions;
+
+import org.apache.spark.api.java.JavaRDD;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.context.SpringBootTest;
+import org.springframework.shell.Shell;
+
+import java.io.IOException;
+import java.util.List;
+
+import static 
org.apache.hudi.common.testutils.HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+@Tag("functional")
+@SpringBootTest(properties = {"spring.shell.interactive.enabled=false", 
"spring.shell.command.script.enabled=false"})
+public class TestMetadataCommand extends CLIFunctionalTestHarness {
+
+  @Autowired
+  private Shell shell;
+  private String tableName;
+  private String tablePath;
+
+  @BeforeEach
+  public void init() throws IOException {
+    tableName = tableName();
+    tablePath = tablePath(tableName);
+    HoodieCLI.conf = hadoopConf();
+  }
+
+  @Test
+  public void testMetadataDelete() throws Exception {
+    HoodieTableMetaClient.withPropertyBuilder()
+        .setTableType(HoodieTableType.COPY_ON_WRITE.name())
+        .setTableName(tableName())
+        
.setArchiveLogFolder(HoodieTableConfig.ARCHIVELOG_FOLDER.defaultValue())
+        .setPayloadClassName("org.apache.hudi.common.model.HoodieAvroPayload")
+        .setTimelineLayoutVersion(TimelineLayoutVersion.VERSION_1)
+        .setPartitionFields("partition_path")
+        .setRecordKeyFields("_row_key")
+        .setKeyGeneratorClassProp(SimpleKeyGenerator.class.getCanonicalName())
+        .initTable(HoodieCLI.conf, tablePath);
+
+    HoodieTestDataGenerator dataGen = new HoodieTestDataGenerator();
+    HoodieWriteConfig config = 
HoodieWriteConfig.newBuilder().withPath(tablePath).withSchema(TRIP_EXAMPLE_SCHEMA).build();
+
+    try (SparkRDDWriteClient client = new SparkRDDWriteClient(context(), 
config)) {
+      String newCommitTime = "001";
+      int numRecords = 10;
+      client.startCommitWithTime(newCommitTime);
+
+      List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, 
numRecords);
+      JavaRDD<HoodieRecord> writeRecords = 
context().getJavaSparkContext().parallelize(records, 1);
+      List<WriteStatus> result = client.upsert(writeRecords, 
newCommitTime).collect();
+      Assertions.assertNoWriteErrors(result);
+    }
+
+    // verify that metadata partitions are filled in as part of table config.
+    HoodieTableMetaClient metaClient = 
HoodieTableMetaClient.builder().setConf(hadoopConf()).setBasePath(tablePath).build();
+    assertFalse(metaClient.getTableConfig().getMetadataPartitions().isEmpty());
+
+    new TableCommand().connect(tablePath, null, false, 0, 0, 0);
+    Object result = shell.evaluate(() -> "metadata delete");
+    assertTrue(ShellEvaluationResultUtil.isSuccess(result));
+
+    metaClient = HoodieTableMetaClient.reload(metaClient);
+    assertTrue(metaClient.getTableConfig().getMetadataPartitions().isEmpty());
+  }
+}

Reply via email to