yanghua commented on a change in pull request #1554:
URL: https://github.com/apache/incubator-hudi/pull/1554#discussion_r420784920



##########
File path: 
hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java
##########
@@ -64,19 +69,35 @@ public String deduplicate(
       @CliOption(key = {"repairedOutputPath"}, help = "Location to place the 
repaired files",
           mandatory = true) final String repairedOutputPath,
       @CliOption(key = {"sparkProperties"}, help = "Spark Properties File 
Path",
-          mandatory = true) final String sparkPropertiesPath)
+          unspecifiedDefaultValue = "") String sparkPropertiesPath,
+      @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = 
"Spark Master ") String master,

Review comment:
       `"Spark Master "` -> `"Spark Master"`?

##########
File path: 
hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java
##########
@@ -64,19 +69,35 @@ public String deduplicate(
       @CliOption(key = {"repairedOutputPath"}, help = "Location to place the 
repaired files",
           mandatory = true) final String repairedOutputPath,
       @CliOption(key = {"sparkProperties"}, help = "Spark Properties File 
Path",
-          mandatory = true) final String sparkPropertiesPath)
+          unspecifiedDefaultValue = "") String sparkPropertiesPath,
+      @CliOption(key = "sparkMaster", unspecifiedDefaultValue = "", help = 
"Spark Master ") String master,
+      @CliOption(key = "sparkMemory", unspecifiedDefaultValue = "4G",
+          help = "Spark executor memory") final String sparkMemory,
+      @CliOption(key = {"dryrun"},
+          help = "Should we actually remove duplicates or just run and store 
result to repairedOutputPath",
+          unspecifiedDefaultValue = "true") final boolean dryRun)
       throws Exception {
+    if (StringUtils.isNullOrEmpty(sparkPropertiesPath)) {
+      sparkPropertiesPath =
+          
Utils.getDefaultPropertiesFile(JavaConverters.mapAsScalaMapConverter(System.getenv()).asScala());
+    }
+
     SparkLauncher sparkLauncher = SparkUtil.initLauncher(sparkPropertiesPath);
-    sparkLauncher.addAppArgs(SparkMain.SparkCommand.DEDUPLICATE.toString(), 
duplicatedPartitionPath, repairedOutputPath,
-        HoodieCLI.getTableMetaClient().getBasePath());
+    sparkLauncher.addAppArgs(SparkMain.SparkCommand.DEDUPLICATE.toString(), 
master, sparkMemory,

Review comment:
       The same suggestion, we should try to define a data structure? We can 
refactor it later.

##########
File path: hudi-cli/src/main/java/org/apache/hudi/cli/commands/SparkMain.java
##########
@@ -73,8 +73,8 @@ public static void main(String[] args) throws Exception {
         returnCode = rollback(jsc, args[1], args[2]);
         break;
       case DEDUPLICATE:
-        assert (args.length == 4);
-        returnCode = deduplicatePartitionPath(jsc, args[1], args[2], args[3]);
+        assert (args.length == 7);
+        returnCode = deduplicatePartitionPath(jsc, args[3], args[4], args[5], 
args[6]);

Review comment:
       IMHO, we also need to refactor the arg parse. But not in this PR.

##########
File path: 
hudi-cli/src/test/java/org/apache/hudi/cli/integ/ITTestRepairsCommand.java
##########
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.cli.integ;
+
+import org.apache.avro.Schema;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hudi.avro.HoodieAvroUtils;
+import org.apache.hudi.cli.AbstractShellIntegrationTest;
+import org.apache.hudi.cli.HoodieCLI;
+import org.apache.hudi.cli.commands.RepairsCommand;
+import org.apache.hudi.cli.commands.TableCommand;
+import org.apache.hudi.common.HoodieClientTestUtils;
+import org.apache.hudi.common.HoodieTestDataGenerator;
+import org.apache.hudi.common.fs.FSUtils;
+import org.apache.hudi.common.model.HoodieBaseFile;
+import org.apache.hudi.common.model.HoodieLogFile;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieTableType;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
+import org.apache.hudi.common.table.view.HoodieTableFileSystemView;
+import org.apache.hudi.common.util.SchemaTestUtil;
+import org.apache.spark.sql.Dataset;
+
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.springframework.shell.core.CommandResult;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import static org.apache.spark.sql.functions.lit;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * Integration test class for {@link RepairsCommand#deduplicate}.
+ * <p/>
+ * A command use SparkLauncher need load jars under lib which generate during 
mvn package.
+ * Use integration test instead of unit test.
+ */
+public class ITTestRepairsCommand extends AbstractShellIntegrationTest {
+
+  private String duplicatedPartitionPath;
+  private String repairedOutputPath;
+
+  @BeforeEach
+  public void init() throws IOException, URISyntaxException {
+    String tablePath = basePath + File.separator + "test_table";
+    duplicatedPartitionPath = tablePath + File.separator + 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
+    repairedOutputPath = basePath + File.separator + "tmp";
+
+    HoodieCLI.conf = jsc.hadoopConfiguration();
+
+    // Create table and connect
+    new TableCommand().createTable(
+        tablePath, "test_table", HoodieTableType.COPY_ON_WRITE.name(),
+        "", TimelineLayoutVersion.VERSION_1, 
"org.apache.hudi.common.model.HoodieAvroPayload");
+
+    // generate 200 records
+    Schema schema = 
HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getSimpleSchema());
+
+    String fileName1 = "1_0_20160401010101.parquet";
+    String fileName2 = "2_0_20160401010101.parquet";
+
+    List<HoodieRecord> hoodieRecords1 = 
SchemaTestUtil.generateHoodieTestRecords(0, 100, schema);
+    HoodieClientTestUtils.writeParquetFile(tablePath, 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH,
+        fileName1, hoodieRecords1, schema, null, false);
+    List<HoodieRecord> hoodieRecords2 = 
SchemaTestUtil.generateHoodieTestRecords(100, 100, schema);
+    HoodieClientTestUtils.writeParquetFile(tablePath, 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH,
+        fileName2, hoodieRecords2, schema, null, false);
+
+    // generate commit file
+    String fileId1 = UUID.randomUUID().toString();
+    String testWriteToken = "1-0-1";
+    String commitTime = FSUtils.getCommitTime(fileName1);
+    Files.createFile(Paths.get(duplicatedPartitionPath + "/"
+        + FSUtils.makeLogFileName(fileId1, HoodieLogFile.DELTA_EXTENSION, 
commitTime, 1, testWriteToken)));
+    Files.createFile(Paths.get(tablePath + "/.hoodie/" + commitTime + 
".commit"));
+
+    // read records and get 10 to generate duplicates
+    Dataset df = sqlContext.read().parquet(duplicatedPartitionPath);
+
+    String fileName3 = "3_0_20160401010202.parquet";
+    commitTime = FSUtils.getCommitTime(fileName3);
+    df.limit(10).withColumn("_hoodie_commit_time", lit(commitTime))
+        .write().parquet(duplicatedPartitionPath + File.separator + fileName3);
+    Files.createFile(Paths.get(tablePath + "/.hoodie/" + commitTime + 
".commit"));
+
+    metaClient = HoodieTableMetaClient.reload(HoodieCLI.getTableMetaClient());
+  }
+
+  /**
+   * Test case for dry run deduplicate.
+   */
+  @Test
+  public void testDeduplicate() throws IOException {
+    // get fs and check number of latest files
+    HoodieTableFileSystemView fsView = new 
HoodieTableFileSystemView(metaClient,
+        
metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants(),
+        fs.listStatus(new Path(duplicatedPartitionPath)));
+    List<String> filteredStatuses = 
fsView.getLatestBaseFiles().map(HoodieBaseFile::getPath).collect(Collectors.toList());
+    assertEquals(3, filteredStatuses.size(), "There should be 3 files.");
+
+    // Before deduplicate, all files contain 210 records
+    String[] files = filteredStatuses.toArray(new String[0]);
+    Dataset df = sqlContext.read().parquet(files);
+    assertEquals(210, df.count());
+
+    String partitionPath = 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
+    String cmdStr = "repair deduplicate --duplicatedPartitionPath " + 
partitionPath
+        + " --repairedOutputPath " + repairedOutputPath + " --sparkMaster 
local";
+    CommandResult cr = getShell().executeCommand(cmdStr);
+    assertTrue(cr.isSuccess());
+    assertEquals(RepairsCommand.DEDUPLICATE_RETURN_PREFIX + 
repairedOutputPath, cr.getResult().toString());
+
+    // After deduplicate, there are 200 records
+    FileStatus[] fileStatus = fs.listStatus(new Path(repairedOutputPath));
+    files = Arrays.stream(fileStatus).map(status -> 
status.getPath().toString()).toArray(String[]::new);
+    Dataset result = sqlContext.read().parquet(files);
+    assertEquals(200, result.count());
+  }
+
+  /**
+   * Test case for real run deduplicate.
+   */
+  @Test
+  public void testDeduplicateWithReal() throws IOException {
+    // get fs and check number of latest files
+    HoodieTableFileSystemView fsView = new 
HoodieTableFileSystemView(metaClient,
+        
metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants(),
+        fs.listStatus(new Path(duplicatedPartitionPath)));
+    List<String> filteredStatuses = 
fsView.getLatestBaseFiles().map(HoodieBaseFile::getPath).collect(Collectors.toList());
+    assertEquals(3, filteredStatuses.size(), "There should be 3 files.");
+
+    // Before deduplicate, all files contain 210 records
+    String[] files = filteredStatuses.toArray(new String[0]);
+    Dataset df = sqlContext.read().parquet(files);
+    assertEquals(210, df.count());
+
+    String partitionPath = 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
+    String cmdStr = "repair deduplicate --duplicatedPartitionPath " + 
partitionPath

Review comment:
       ditto

##########
File path: 
hudi-cli/src/test/java/org/apache/hudi/cli/integ/ITTestRepairsCommand.java
##########
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.cli.integ;
+
+import org.apache.avro.Schema;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hudi.avro.HoodieAvroUtils;
+import org.apache.hudi.cli.AbstractShellIntegrationTest;
+import org.apache.hudi.cli.HoodieCLI;
+import org.apache.hudi.cli.commands.RepairsCommand;
+import org.apache.hudi.cli.commands.TableCommand;
+import org.apache.hudi.common.HoodieClientTestUtils;
+import org.apache.hudi.common.HoodieTestDataGenerator;
+import org.apache.hudi.common.fs.FSUtils;
+import org.apache.hudi.common.model.HoodieBaseFile;
+import org.apache.hudi.common.model.HoodieLogFile;
+import org.apache.hudi.common.model.HoodieRecord;
+import org.apache.hudi.common.model.HoodieTableType;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
+import org.apache.hudi.common.table.view.HoodieTableFileSystemView;
+import org.apache.hudi.common.util.SchemaTestUtil;
+import org.apache.spark.sql.Dataset;
+
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.springframework.shell.core.CommandResult;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import static org.apache.spark.sql.functions.lit;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * Integration test class for {@link RepairsCommand#deduplicate}.
+ * <p/>
+ * A command use SparkLauncher need load jars under lib which generate during 
mvn package.
+ * Use integration test instead of unit test.
+ */
+public class ITTestRepairsCommand extends AbstractShellIntegrationTest {
+
+  private String duplicatedPartitionPath;
+  private String repairedOutputPath;
+
+  @BeforeEach
+  public void init() throws IOException, URISyntaxException {
+    String tablePath = basePath + File.separator + "test_table";
+    duplicatedPartitionPath = tablePath + File.separator + 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
+    repairedOutputPath = basePath + File.separator + "tmp";
+
+    HoodieCLI.conf = jsc.hadoopConfiguration();
+
+    // Create table and connect
+    new TableCommand().createTable(
+        tablePath, "test_table", HoodieTableType.COPY_ON_WRITE.name(),
+        "", TimelineLayoutVersion.VERSION_1, 
"org.apache.hudi.common.model.HoodieAvroPayload");
+
+    // generate 200 records
+    Schema schema = 
HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getSimpleSchema());
+
+    String fileName1 = "1_0_20160401010101.parquet";
+    String fileName2 = "2_0_20160401010101.parquet";
+
+    List<HoodieRecord> hoodieRecords1 = 
SchemaTestUtil.generateHoodieTestRecords(0, 100, schema);
+    HoodieClientTestUtils.writeParquetFile(tablePath, 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH,
+        fileName1, hoodieRecords1, schema, null, false);
+    List<HoodieRecord> hoodieRecords2 = 
SchemaTestUtil.generateHoodieTestRecords(100, 100, schema);
+    HoodieClientTestUtils.writeParquetFile(tablePath, 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH,
+        fileName2, hoodieRecords2, schema, null, false);
+
+    // generate commit file
+    String fileId1 = UUID.randomUUID().toString();
+    String testWriteToken = "1-0-1";
+    String commitTime = FSUtils.getCommitTime(fileName1);
+    Files.createFile(Paths.get(duplicatedPartitionPath + "/"
+        + FSUtils.makeLogFileName(fileId1, HoodieLogFile.DELTA_EXTENSION, 
commitTime, 1, testWriteToken)));
+    Files.createFile(Paths.get(tablePath + "/.hoodie/" + commitTime + 
".commit"));
+
+    // read records and get 10 to generate duplicates
+    Dataset df = sqlContext.read().parquet(duplicatedPartitionPath);
+
+    String fileName3 = "3_0_20160401010202.parquet";
+    commitTime = FSUtils.getCommitTime(fileName3);
+    df.limit(10).withColumn("_hoodie_commit_time", lit(commitTime))
+        .write().parquet(duplicatedPartitionPath + File.separator + fileName3);
+    Files.createFile(Paths.get(tablePath + "/.hoodie/" + commitTime + 
".commit"));
+
+    metaClient = HoodieTableMetaClient.reload(HoodieCLI.getTableMetaClient());
+  }
+
+  /**
+   * Test case for dry run deduplicate.
+   */
+  @Test
+  public void testDeduplicate() throws IOException {
+    // get fs and check number of latest files
+    HoodieTableFileSystemView fsView = new 
HoodieTableFileSystemView(metaClient,
+        
metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants(),
+        fs.listStatus(new Path(duplicatedPartitionPath)));
+    List<String> filteredStatuses = 
fsView.getLatestBaseFiles().map(HoodieBaseFile::getPath).collect(Collectors.toList());
+    assertEquals(3, filteredStatuses.size(), "There should be 3 files.");
+
+    // Before deduplicate, all files contain 210 records
+    String[] files = filteredStatuses.toArray(new String[0]);
+    Dataset df = sqlContext.read().parquet(files);
+    assertEquals(210, df.count());
+
+    String partitionPath = 
HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
+    String cmdStr = "repair deduplicate --duplicatedPartitionPath " + 
partitionPath

Review comment:
       Can we use `String.format(xxx)` here?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to