[
https://issues.apache.org/jira/browse/FLINK-4228?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16240261#comment-16240261
]
ASF GitHub Bot commented on FLINK-4228:
---------------------------------------
Github user tillrohrmann commented on a diff in the pull request:
https://github.com/apache/flink/pull/4939#discussion_r149071810
--- Diff:
flink-yarn/src/test/java/org/apache/flink/yarn/YarnFileStageTest.java ---
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.flink.yarn;
+
+import org.apache.flink.util.OperatingSystem;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+/**
+ * Tests for verifying file staging during submission to YARN works.
+ */
+public class YarnFileStageTest {
+
+ @ClassRule
+ public static final TemporaryFolder CLASS_TEMP_DIR = new
TemporaryFolder();
+
+ @Rule
+ public TemporaryFolder tempFolder = new TemporaryFolder();
+
+ private static MiniDFSCluster hdfsCluster;
+
+ private static Path hdfsRootPath;
+
+ private org.apache.hadoop.conf.Configuration hadoopConfig;
+
+ //
------------------------------------------------------------------------
+ // Test setup and shutdown
+ //
------------------------------------------------------------------------
+
+ @BeforeClass
+ public static void createHDFS() throws Exception {
+ Assume.assumeTrue(!OperatingSystem.isWindows());
+
+ final File tempDir = CLASS_TEMP_DIR.newFolder();
+
+ org.apache.hadoop.conf.Configuration hdConf = new
org.apache.hadoop.conf.Configuration();
+ hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
tempDir.getAbsolutePath());
+
+ MiniDFSCluster.Builder builder = new
MiniDFSCluster.Builder(hdConf);
+ hdfsCluster = builder.build();
+ hdfsRootPath = new Path(hdfsCluster.getURI());
+ }
+
+ @AfterClass
+ public static void destroyHDFS() {
+ if (hdfsCluster != null) {
+ hdfsCluster.shutdown();
+ }
+ hdfsCluster = null;
+ hdfsRootPath = null;
+ }
+
+ @Before
+ public void initConfig() {
+ hadoopConfig = new org.apache.hadoop.conf.Configuration();
+
hadoopConfig.set(org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY,
hdfsRootPath.toString());
+ }
+
+ /**
+ * Verifies that nested directories are properly copied with a
<tt>hdfs://</tt> file
+ * system (from a <tt>file:///absolute/path</tt> source path).
+ */
+ @Test
+ public void testCopyFromLocalRecursiveWithScheme() throws Exception {
+ final FileSystem targetFileSystem =
hdfsRootPath.getFileSystem(hadoopConfig);
+ final Path targetDir = targetFileSystem.getWorkingDirectory();
+
+ testCopyFromLocalRecursive(targetFileSystem, targetDir,
tempFolder, true);
+ }
+
+ /**
+ * Verifies that nested directories are properly copied with a
<tt>hdfs://</tt> file
+ * system (from a <tt>/absolute/path</tt> source path).
+ */
+ @Test
+ public void testCopyFromLocalRecursiveWithoutScheme() throws Exception {
+ final FileSystem targetFileSystem =
hdfsRootPath.getFileSystem(hadoopConfig);
+ final Path targetDir = targetFileSystem.getWorkingDirectory();
+
+ testCopyFromLocalRecursive(targetFileSystem, targetDir,
tempFolder, false);
+ }
+
+ /**
+ * Verifies that nested directories are properly copied with the given
filesystem and paths.
+ *
+ * @param targetFileSystem
+ * file system of the target path
+ * @param targetDir
+ * target path (URI like <tt>hdfs://...</tt>)
+ * @param tempFolder
+ * JUnit temporary folder rule to create the source
directory with
+ * @param addSchemeToLocalPath
+ * whether add the <tt>file://</tt> scheme to the local
path to copy from
+ */
+ public static void testCopyFromLocalRecursive(
+ FileSystem targetFileSystem, Path targetDir,
TemporaryFolder tempFolder,
+ boolean addSchemeToLocalPath) throws Exception {
--- End diff --
nit: line breaks inconsistent.
> YARN artifact upload does not work with S3AFileSystem
> -----------------------------------------------------
>
> Key: FLINK-4228
> URL: https://issues.apache.org/jira/browse/FLINK-4228
> Project: Flink
> Issue Type: Bug
> Components: State Backends, Checkpointing
> Reporter: Ufuk Celebi
> Priority: Blocker
> Fix For: 1.4.0
>
>
> The issue now is exclusive to running on YARN with s3a:// as your configured
> FileSystem. If so, the Flink session will fail on staging itself because it
> tries to copy the flink/lib directory to S3 and the S3aFileSystem does not
> support recursive copy.
> h2. Old Issue
> Using the {{RocksDBStateBackend}} with semi-async snapshots (current default)
> leads to an Exception when uploading the snapshot to S3 when using the
> {{S3AFileSystem}}.
> {code}
> AsynchronousException{com.amazonaws.AmazonClientException: Unable to
> calculate MD5 hash:
> /var/folders/_c/5tc5q5q55qjcjtqwlwvwd1m00000gn/T/flink-io-5640e9f1-3ea4-4a0f-b4d9-3ce9fbd98d8a/7c6e745df2dddc6eb70def1240779e44/StreamFlatMap_3_0/dummy_state/47daaf2a-150c-4208-aa4b-409927e9e5b7/local-chk-2886
> (Is a directory)}
> at
> org.apache.flink.streaming.runtime.tasks.StreamTask$AsyncCheckpointThread.run(StreamTask.java:870)
> Caused by: com.amazonaws.AmazonClientException: Unable to calculate MD5 hash:
> /var/folders/_c/5tc5q5q55qjcjtqwlwvwd1m00000gn/T/flink-io-5640e9f1-3ea4-4a0f-b4d9-3ce9fbd98d8a/7c6e745df2dddc6eb70def1240779e44/StreamFlatMap_3_0/dummy_state/47daaf2a-150c-4208-aa4b-409927e9e5b7/local-chk-2886
> (Is a directory)
> at
> com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1298)
> at
> com.amazonaws.services.s3.transfer.internal.UploadCallable.uploadInOneChunk(UploadCallable.java:108)
> at
> com.amazonaws.services.s3.transfer.internal.UploadCallable.call(UploadCallable.java:100)
> at
> com.amazonaws.services.s3.transfer.internal.UploadMonitor.upload(UploadMonitor.java:192)
> at
> com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:150)
> at
> com.amazonaws.services.s3.transfer.internal.UploadMonitor.call(UploadMonitor.java:50)
> at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
> at java.lang.Thread.run(Thread.java:745)
> Caused by: java.io.FileNotFoundException:
> /var/folders/_c/5tc5q5q55qjcjtqwlwvwd1m00000gn/T/flink-io-5640e9f1-3ea4-4a0f-b4d9-3ce9fbd98d8a/7c6e745df2dddc6eb70def1240779e44/StreamFlatMap_3_0/dummy_state/47daaf2a-150c-4208-aa4b-409927e9e5b7/local-chk-2886
> (Is a directory)
> at java.io.FileInputStream.open0(Native Method)
> at java.io.FileInputStream.open(FileInputStream.java:195)
> at java.io.FileInputStream.<init>(FileInputStream.java:138)
> at
> com.amazonaws.services.s3.AmazonS3Client.putObject(AmazonS3Client.java:1294)
> ... 9 more
> {code}
> Running with S3NFileSystem, the error does not occur. The problem might be
> due to {{HDFSCopyToLocal}} assuming that sub-folders are going to be created
> automatically. We might need to manually create folders and copy only actual
> files for {{S3AFileSystem}}. More investigation is required.
--
This message was sent by Atlassian JIRA
(v6.4.14#64029)