xushiyan commented on a change in pull request #1360: [HUDI-344][RFC-09] Hudi 
Dataset Snapshot Exporter
URL: https://github.com/apache/incubator-hudi/pull/1360#discussion_r386783660
 
 

 ##########
 File path: 
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java
 ##########
 @@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.utilities;
+
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.SerializableConfiguration;
+import org.apache.hudi.common.model.HoodiePartitionMetadata;
+import org.apache.hudi.common.table.HoodieTableConfig;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.HoodieTimeline;
+import org.apache.hudi.common.table.TableFileSystemView;
+import org.apache.hudi.common.table.timeline.HoodieInstant;
+import org.apache.hudi.common.table.view.HoodieTableFileSystemView;
+import org.apache.hudi.common.util.FSUtils;
+import org.apache.hudi.common.util.Option;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.sql.Column;
+import org.apache.spark.sql.SaveMode;
+import org.apache.spark.sql.SparkSession;
+
+import scala.Tuple2;
+import scala.collection.JavaConversions;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Export the latest records of Hudi dataset to a set of external files (e.g., 
plain parquet files).
+ */
+
+public class HoodieSnapshotExporter {
+  private static final Logger LOG = 
LogManager.getLogger(HoodieSnapshotExporter.class);
+
+  public static class Config implements Serializable {
+    @Parameter(names = {"--source-base-path", "-sbp"}, description = "Base 
path for the source Hudi dataset to be snapshotted", required = true)
+    String basePath = null;
+
+    @Parameter(names = {"--target-base-path", "-tbp"}, description = "Base 
path for the target output files (snapshots)", required = true)
+    String outputPath = null;
+
+    @Parameter(names = {"--snapshot-prefix", "-sp"}, description = "Snapshot 
prefix or directory under the target base path in order to segregate different 
snapshots")
+    String snapshotPrefix;
+
+    @Parameter(names = {"--output-format", "-of"}, description = "e.g. Hudi or 
Parquet", required = true)
+    String outputFormat;
+
+    @Parameter(names = {"--output-partition-field", "-opf"}, description = "A 
field to be used by Spark repartitioning")
+    String outputPartitionField;
+  }
+
+  public void export(SparkSession spark, Config cfg) throws IOException {
+    String sourceBasePath = cfg.basePath;
+    String targetBasePath = cfg.outputPath;
+    String snapshotPrefix = cfg.snapshotPrefix;
+    String outputFormat = cfg.outputFormat;
+    String outputPartitionField = cfg.outputPartitionField;
+    JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
+    FileSystem fs = FSUtils.getFs(sourceBasePath, jsc.hadoopConfiguration());
+
+    final SerializableConfiguration serConf = new 
SerializableConfiguration(jsc.hadoopConfiguration());
+    final HoodieTableMetaClient tableMetadata = new 
HoodieTableMetaClient(fs.getConf(), sourceBasePath);
+    final TableFileSystemView.BaseFileOnlyView fsView = new 
HoodieTableFileSystemView(tableMetadata,
+        
tableMetadata.getActiveTimeline().getCommitsTimeline().filterCompletedInstants());
+    // Get the latest commit
+    Option<HoodieInstant> latestCommit =
+        
tableMetadata.getActiveTimeline().getCommitsTimeline().filterCompletedInstants().lastInstant();
+    if (!latestCommit.isPresent()) {
+      LOG.warn("No commits present. Nothing to snapshot");
+      return;
+    }
+    final String latestCommitTimestamp = latestCommit.get().getTimestamp();
+    LOG.info(String.format("Starting to snapshot latest version files which 
are also no-late-than %s.",
+        latestCommitTimestamp));
+
+    List<String> partitions = FSUtils.getAllPartitionPaths(fs, sourceBasePath, 
false);
+    if (partitions.size() > 0) {
+      List<String> dataFiles = new ArrayList<>();
+
+      if (!StringUtils.isNullOrEmpty(snapshotPrefix)) {
+        for (String partition : partitions) {
+          if (partition.contains(snapshotPrefix)) {
+            dataFiles.addAll(fsView.getLatestBaseFilesBeforeOrOn(partition, 
latestCommitTimestamp).map(f -> f.getPath()).collect(Collectors.toList()));
+          }
+        }
+      } else {
+        for (String partition : partitions) {
+          dataFiles.addAll(fsView.getLatestBaseFilesBeforeOrOn(partition, 
latestCommitTimestamp).map(f -> f.getPath()).collect(Collectors.toList()));
+        }
+      }
+
+      if (!outputFormat.equalsIgnoreCase("hudi")) {
 
 Review comment:
   I got your concern. The major point I'd to emphasize here is we want to be 
explicit on what we support for this utility to the users. 
   The spark `format()` API is powerful and reduces the code; on the down side, 
it makes the code difficult to understand: we claim supporting parquet and 
hudi, but internally we rely on spark API support. I imagine whatever claimed 
in the configuration docs should be associated to the code for ease of 
understanding; that's what I intend the `switch` for. With fall-through switch 
cases, you can still use `format()` and achieve explicitness.
   Though saying that, I don't mind with your current approach and change the 
Exporter's docs saying support all Spark write format. A little bit worried 
about spark decoupling in future, but maybe not big issue for now.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to