[ https://issues.apache.org/jira/browse/CRUNCH-679?focusedWorklogId=276164&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-276164 ]
ASF GitHub Bot logged work on CRUNCH-679: ----------------------------------------- Author: ASF GitHub Bot Created on: 12/Jul/19 21:48 Start Date: 12/Jul/19 21:48 Worklog Time Spent: 10m Work Description: mkwhitacre commented on pull request #20: CRUNCH-679: Improvements for usage of DistCp URL: https://github.com/apache/crunch/pull/20#discussion_r303159612 ########## File path: crunch-core/src/main/java/org/apache/crunch/util/CrunchRenameCopyListing.java ########## @@ -0,0 +1,272 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the + * Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +package org.apache.crunch.util; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.tools.CopyListing; +import org.apache.hadoop.tools.CopyListingFileStatus; +import org.apache.hadoop.tools.DistCpOptions; +import org.apache.hadoop.tools.DistCpOptions.FileAttribute; +import org.apache.hadoop.tools.SimpleCopyListing; +import org.apache.hadoop.tools.util.DistCpUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Stack; + +/** + * A custom {@link CopyListing} implementation capable of dynamically renaming + * the target paths according to a {@link #DISTCP_PATH_RENAMES configured set of values}. + * <p> + * Once https://issues.apache.org/jira/browse/HADOOP-16147 is available, this + * class can be significantly simplified. + * </p> + */ +public class CrunchRenameCopyListing extends SimpleCopyListing { + /** + * Comma-separated list of original-file:renamed-file path rename pairs. + */ + public static final String DISTCP_PATH_RENAMES = "crunch.distcp.path.renames"; + + private static final Logger LOG = LoggerFactory.getLogger(CrunchRenameCopyListing.class); + private final Map<String, String> pathRenames; + + private long totalPaths = 0; + private long totalBytesToCopy = 0; + + /** + * Constructor, to initialize configuration. + * + * @param configuration The input configuration, with which the source/target FileSystems may be accessed. + * @param credentials - Credentials object on which the FS delegation tokens are cached. If null + * delegation token caching is skipped + */ + public CrunchRenameCopyListing(Configuration configuration, Credentials credentials) { + super(configuration, credentials); + + pathRenames = new HashMap<>(); + + String[] pathRenameConf = configuration.getStrings(DISTCP_PATH_RENAMES); + if (pathRenameConf == null) { + throw new IllegalArgumentException("Missing required configuration: " + DISTCP_PATH_RENAMES); + } + for (String pathRename : pathRenameConf) { + String[] pathRenameParts = pathRename.split(":"); + if (pathRenameParts.length != 2) { + throw new IllegalArgumentException("Invalid path rename format: " + pathRename); + } + if (pathRenames.put(pathRenameParts[0], pathRenameParts[1]) != null) { + throw new IllegalArgumentException("Invalid duplicate path rename: " + pathRenameParts[0]); + } + } + LOG.info("Loaded {} path rename entries", pathRenames.size()); + + // Clear out the rename configuration property, as it is no longer needed + configuration.unset(DISTCP_PATH_RENAMES); + } + + @Override + public void doBuildListing(SequenceFile.Writer fileListWriter, DistCpOptions options) throws IOException { + try { + for (Path path : options.getSourcePaths()) { + FileSystem sourceFS = path.getFileSystem(getConf()); + final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL); + final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR); + final boolean preserveRawXAttrs = options.shouldPreserveRawXattrs(); + path = makeQualified(path); + + FileStatus rootStatus = sourceFS.getFileStatus(path); + Path sourcePathRoot = computeSourceRootPath(rootStatus, options); + + FileStatus[] sourceFiles = sourceFS.listStatus(path); + boolean explore = (sourceFiles != null && sourceFiles.length > 0); + if (!explore || rootStatus.isDirectory()) { + CopyListingFileStatus rootCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, rootStatus, preserveAcls, + preserveXAttrs, preserveRawXAttrs); + writeToFileListingRoot(fileListWriter, rootCopyListingStatus, sourcePathRoot, options); + } + if (explore) { + for (FileStatus sourceStatus : sourceFiles) { + if (LOG.isDebugEnabled()) { + LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy."); + } + CopyListingFileStatus sourceCopyListingStatus = DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus, + preserveAcls && sourceStatus.isDirectory(), preserveXAttrs && sourceStatus.isDirectory(), + preserveRawXAttrs && sourceStatus.isDirectory()); + writeToFileListing(fileListWriter, sourceCopyListingStatus, sourcePathRoot, options); + + if (isDirectoryAndNotEmpty(sourceFS, sourceStatus)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Traversing non-empty source dir: " + sourceStatus.getPath()); + } + traverseNonEmptyDirectory(fileListWriter, sourceStatus, sourcePathRoot, options); + } + } + } + } + fileListWriter.close(); + fileListWriter = null; + } finally { + if (fileListWriter != null) { + try { + fileListWriter.close(); + } catch(IOException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Exception in closing " + fileListWriter, e); + } + } + } + } + } + + private Path computeSourceRootPath(FileStatus sourceStatus, DistCpOptions options) throws IOException { + Path target = options.getTargetPath(); + FileSystem targetFS = target.getFileSystem(getConf()); + final boolean targetPathExists = options.getTargetPathExists(); + + boolean solitaryFile = options.getSourcePaths().size() == 1 && !sourceStatus.isDirectory(); + + if (solitaryFile) { + if (targetFS.isFile(target) || !targetPathExists) { + return sourceStatus.getPath(); + } else { + return sourceStatus.getPath().getParent(); + } + } else { + boolean specialHandling = + (options.getSourcePaths().size() == 1 && !targetPathExists) || options.shouldSyncFolder() || options.shouldOverwrite(); + + return specialHandling && sourceStatus.isDirectory() ? sourceStatus.getPath() : sourceStatus.getPath().getParent(); + } + } + + private Path makeQualified(Path path) throws IOException { + final FileSystem fs = path.getFileSystem(getConf()); + return path.makeQualified(fs.getUri(), fs.getWorkingDirectory()); + } + + private static boolean isDirectoryAndNotEmpty(FileSystem fileSystem, FileStatus fileStatus) throws IOException { + return fileStatus.isDirectory() && getChildren(fileSystem, fileStatus).length > 0; + } + + private static FileStatus[] getChildren(FileSystem fileSystem, FileStatus parent) throws IOException { + return fileSystem.listStatus(parent.getPath()); + } + + private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter, FileStatus sourceStatus, Path sourcePathRoot, + DistCpOptions options) throws IOException { + FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf()); + final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL); + final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR); + final boolean preserveRawXattrs = options.shouldPreserveRawXattrs(); + Stack<FileStatus> pathStack = new Stack<>(); + pathStack.push(sourceStatus); + + while (!pathStack.isEmpty()) { + for (FileStatus child : getChildren(sourceFS, pathStack.pop())) { + if (LOG.isDebugEnabled()) { + LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy."); Review comment: should be able to do string interpolation right instead of concatenation? ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org Issue Time Tracking ------------------- Worklog Id: (was: 276164) Time Spent: 1h 10m (was: 1h) > Improvements for usage of DistCp > -------------------------------- > > Key: CRUNCH-679 > URL: https://issues.apache.org/jira/browse/CRUNCH-679 > Project: Crunch > Issue Type: Improvement > Components: Core > Reporter: Andrew Olson > Assignee: Josh Wills > Priority: Major > Time Spent: 1h 10m > Remaining Estimate: 0h > > As a follow-up to CRUNCH-660 and CRUNCH-675, a handful of corrections and > improvements have been identified during testing. > * We need to preserve preferred part names, e.g. part-m-00000. Currently the > DistCp support in Crunch does not make use of the FileTargetImpl#getDestFile > method, and would therefore create destination file names like out0-m-00000, > which are problematic when there are multiple map-only jobs writing to the > same target path. This can be achieved by providing a custom CopyListing > implementation that is capable of dynamically renaming target paths based on > a given mapping. Unfortunately a substantial amount of code duplication from > the original SimpleCopyListing class is currently required in order to inject > the necessary logic for modifying the sequence file entry keys. HADOOP-16147 > has been opened to allow it to be simplified in the future. > * The handleOutputs implementation in HFileTarget is essentially identical to > the one in FileTargetImpl that it overrides. We can remove it and just share > the same code. > * It could be useful to add a property for configuring the max DistCp task > bandwidth, as the default (100 MB/s per task) may be too high for certain > environments. > * The default of 1000 for max DistCp map tasks may be too high in some > situations resulting in 503 Slow Down errors from S3 especially if there are > multiple jobs writing into the same bucket. Reducing to 100 should help > prevent issues along those lines while still providing adequate parallel > throughput. -- This message was sent by Atlassian JIRA (v7.6.14#76016)