xushiyan commented on a change in pull request #1360: [HUDI-344][RFC-09] Hudi Dataset Snapshot Exporter URL: https://github.com/apache/incubator-hudi/pull/1360#discussion_r386093576
########## File path: hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java ########## @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.utilities; + +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hudi.common.util.StringUtils; +import org.apache.hudi.common.SerializableConfiguration; +import org.apache.hudi.common.model.HoodiePartitionMetadata; +import org.apache.hudi.common.table.HoodieTableConfig; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.HoodieTimeline; +import org.apache.hudi.common.table.TableFileSystemView; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.view.HoodieTableFileSystemView; +import org.apache.hudi.common.util.FSUtils; +import org.apache.hudi.common.util.Option; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; +import org.apache.spark.api.java.JavaSparkContext; +import org.apache.spark.sql.Column; +import org.apache.spark.sql.SaveMode; +import org.apache.spark.sql.SparkSession; + +import scala.Tuple2; +import scala.collection.JavaConversions; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Export the latest records of Hudi dataset to a set of external files (e.g., plain parquet files). + */ + +public class HoodieSnapshotExporter { + private static final Logger LOG = LogManager.getLogger(HoodieSnapshotExporter.class); + + public static class Config implements Serializable { + @Parameter(names = {"--source-base-path", "-sbp"}, description = "Base path for the source Hudi dataset to be snapshotted", required = true) + String basePath = null; + + @Parameter(names = {"--target-base-path", "-tbp"}, description = "Base path for the target output files (snapshots)", required = true) + String outputPath = null; + + @Parameter(names = {"--snapshot-prefix", "-sp"}, description = "Snapshot prefix or directory under the target base path in order to segregate different snapshots") + String snapshotPrefix; + + @Parameter(names = {"--output-format", "-of"}, description = "e.g. Hudi or Parquet", required = true) + String outputFormat; + + @Parameter(names = {"--output-partition-field", "-opf"}, description = "A field to be used by Spark repartitioning") + String outputPartitionField; + } + + public void export(SparkSession spark, Config cfg) throws IOException { + String sourceBasePath = cfg.basePath; + String targetBasePath = cfg.outputPath; + String snapshotPrefix = cfg.snapshotPrefix; + String outputFormat = cfg.outputFormat; + String outputPartitionField = cfg.outputPartitionField; + JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext()); + FileSystem fs = FSUtils.getFs(sourceBasePath, jsc.hadoopConfiguration()); + + final SerializableConfiguration serConf = new SerializableConfiguration(jsc.hadoopConfiguration()); + final HoodieTableMetaClient tableMetadata = new HoodieTableMetaClient(fs.getConf(), sourceBasePath); + final TableFileSystemView.BaseFileOnlyView fsView = new HoodieTableFileSystemView(tableMetadata, + tableMetadata.getActiveTimeline().getCommitsTimeline().filterCompletedInstants()); + // Get the latest commit + Option<HoodieInstant> latestCommit = + tableMetadata.getActiveTimeline().getCommitsTimeline().filterCompletedInstants().lastInstant(); + if (!latestCommit.isPresent()) { + LOG.warn("No commits present. Nothing to snapshot"); + return; + } + final String latestCommitTimestamp = latestCommit.get().getTimestamp(); + LOG.info(String.format("Starting to snapshot latest version files which are also no-late-than %s.", + latestCommitTimestamp)); + + List<String> partitions = FSUtils.getAllPartitionPaths(fs, sourceBasePath, false); + if (partitions.size() > 0) { + List<String> dataFiles = new ArrayList<>(); + + if (!StringUtils.isNullOrEmpty(snapshotPrefix)) { + for (String partition : partitions) { + if (partition.contains(snapshotPrefix)) { + dataFiles.addAll(fsView.getLatestBaseFilesBeforeOrOn(partition, latestCommitTimestamp).map(f -> f.getPath()).collect(Collectors.toList())); + } + } + } else { + for (String partition : partitions) { + dataFiles.addAll(fsView.getLatestBaseFilesBeforeOrOn(partition, latestCommitTimestamp).map(f -> f.getPath()).collect(Collectors.toList())); + } + } + + if (!outputFormat.equalsIgnoreCase("hudi")) { Review comment: @OpenOpened Understand that the format() can provide some flexibility there but i would argue against it for 2 reasons 1. we are providing this as a user-input argument to establish a contract with value passed to our API. We don't want to rely it on the internal logic where spark takes that and does the conversion. Imagine if user passes an invalid value like "foo" and then Spark API throws an error which will expose the internal implementation which user should not care about. 2. We want to be explicit on what the API supports and does not. `switch` makes it clear and the code more readable. And the `default:` case will be the ideal place to throw "Unsupported" exception to user. In addition, the `export()` method is sort of lengthy. For better readability, I would suggest break it into multiple methods. (Handling different conversion type could be a good separation) ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] With regards, Apache Git Services
