hadoop git commit: HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

2018-01-17 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e54c65a32 -> 55142849d


HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential 
read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

(cherry picked from commit 9195a6e302028ed3921d1016ac2fa5754f06ebf0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55142849
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55142849
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55142849

Branch: refs/heads/branch-3.0
Commit: 55142849db02a9191db0dd6f4e1401ff19ec242a
Parents: e54c65a
Author: Sammi Chen 
Authored: Wed Jan 17 15:55:59 2018 +0800
Committer: Sammi Chen 
Committed: Wed Jan 17 16:12:23 2018 +0800

--
 .../dev-support/findbugs-exclude.xml|   8 +
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 ++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +++-
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 +--
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 ++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 +++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 ++
 8 files changed, 407 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55142849/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index 40d78d0..c55f8e3 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,4 +15,12 @@
limitations under the License.
 -->
 
+
+
+
+
+
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55142849/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
new file mode 100644
index 000..e5bfc2c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.aliyun.oss;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Used by {@link AliyunOSSInputStream} as an task that submitted
+ * to the thread pool.
+ * Each AliyunOSSFileReaderTask reads one part of the file so that
+ * we can accelerate the sequential read.
+ */
+public class AliyunOSSFileReaderTask implements Runnable {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
+
+  private String key;
+  private AliyunOSSFileSystemStore store;
+  private ReadBuffer readBuffer;
+  private static final int MAX_RETRIES = 3;
+  private RetryPolicy retryPolicy;
+
+  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
+  ReadBuffer readBuffer) {
+this.key = key;
+this.store = store;
+this.readBuffer = readBuffer;
+RetryPolicy defaultPolicy =
+RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+MAX_RETRIES, 3, TimeUnit.SECONDS);
+Map, RetryPolicy> policies = new HashMap<>();
+policies.put(IOException.class, defaultPolicy);
+policies.put(IndexOutOfBoundsException.class,
+

[1/2] hadoop git commit: HADOOP-15141 Support IAM Assumed roles in S3A. Contributed by Steve Loughran.

2018-01-17 Thread fabbri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9195a6e30 -> 268ab4e02


http://git-wip-us.apache.org/repos/asf/hadoop/blob/268ab4e0/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
index 95d44cc..4ee0fcb 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
@@ -19,9 +19,10 @@
 package org.apache.hadoop.fs.s3a;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -37,6 +38,7 @@ import java.net.URI;
 import java.net.URLEncoder;
 import java.nio.file.AccessDeniedException;
 
+import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER;
 import static org.apache.hadoop.fs.s3a.S3ATestConstants.TEST_FS_S3A_NAME;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.assumeS3GuardState;
 
@@ -120,11 +122,12 @@ public class ITestS3ACredentialsInURL extends Assert {
 
   /**
* Set up some invalid credentials, verify login is rejected.
-   * @throws Throwable
*/
   @Test
   public void testInvalidCredentialsFail() throws Throwable {
 Configuration conf = new Configuration();
+// use the default credential provider chain
+conf.unset(AWS_CREDENTIALS_PROVIDER);
 String fsname = conf.getTrimmed(TEST_FS_S3A_NAME, "");
 Assume.assumeNotNull(fsname);
 assumeS3GuardState(false, conf);
@@ -132,14 +135,11 @@ public class ITestS3ACredentialsInURL extends Assert {
 URI testURI = createUriWithEmbeddedSecrets(original, "user", "//");
 
 conf.set(TEST_FS_S3A_NAME, testURI.toString());
-try {
-  fs = S3ATestUtils.createTestFileSystem(conf);
-  FileStatus status = fs.getFileStatus(new Path("/"));
-  fail("Expected an AccessDeniedException, got " + status);
-} catch (AccessDeniedException e) {
-  // expected
-}
-
+LambdaTestUtils.intercept(AccessDeniedException.class,
+() -> {
+  fs = S3ATestUtils.createTestFileSystem(conf);
+  return fs.getFileStatus(new Path("/"));
+});
   }
 
   private URI createUriWithEmbeddedSecrets(URI original,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/268ab4e0/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
index ddf2529..7a21876 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
@@ -186,4 +186,11 @@ public class ITestS3AMiscOperations extends 
AbstractS3ATestBase {
 fs.getFileChecksum(f, HELLO.length * 2));
   }
 
+  @Test
+  public void testS3AToStringUnitialized() throws Throwable {
+try(S3AFileSystem fs = new S3AFileSystem()) {
+  fs.toString();
+}
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/268ab4e0/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index f4e7c68..d6533bf 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -828,9 +828,24 @@ public final class S3ATestUtils {
* Skip a test if the FS isn't marked as supporting magic commits.
* @param fs filesystem
*/
-  public void assumeMagicCommitEnabled(S3AFileSystem fs) {
+  public static void assumeMagicCommitEnabled(S3AFileSystem fs) {
 assume("Magic commit option disabled on " + fs,
 fs.hasCapability(CommitConstants.STORE_CAPABILITY_MAGIC_COMMITTER));
   }
 
+  /**
+   * Probe for the configuration containing a specific credential provider.
+   * If the list is empty, there will be no match, even if the named provider
+   * is on the default list.
+   *
+   * @param conf configuration
+   * @param providerClassname provider class
+   * @return true if the configuration contains that classname.
+

[2/2] hadoop git commit: HADOOP-15141 Support IAM Assumed roles in S3A. Contributed by Steve Loughran.

2018-01-17 Thread fabbri
HADOOP-15141 Support IAM Assumed roles in S3A. Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/268ab4e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/268ab4e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/268ab4e0

Branch: refs/heads/trunk
Commit: 268ab4e0279b3e40f4a627d3dfe91e2a3523a8cc
Parents: 9195a6e
Author: Aaron Fabbri 
Authored: Wed Jan 17 00:04:09 2018 -0800
Committer: Aaron Fabbri 
Committed: Wed Jan 17 00:05:24 2018 -0800

--
 .../src/main/resources/core-default.xml |  59 ++
 .../fs/s3a/AWSCredentialProviderList.java   |  48 +-
 .../fs/s3a/AssumedRoleCredentialProvider.java   | 197 ++
 .../org/apache/hadoop/fs/s3a/Constants.java |  37 ++
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |   8 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  55 +-
 .../hadoop/fs/s3native/S3xLoginHelper.java  |   4 +-
 .../markdown/tools/hadoop-aws/assumed_roles.md  | 595 +++
 .../src/site/markdown/tools/hadoop-aws/index.md |  55 +-
 .../site/markdown/tools/hadoop-aws/testing.md   |  42 ++
 .../tools/hadoop-aws/troubleshooting_s3a.md |   5 +-
 .../s3a/ITestS3AContractDistCpAssumedRole.java  |  52 ++
 .../apache/hadoop/fs/s3a/ITestAssumeRole.java   | 324 ++
 .../hadoop/fs/s3a/ITestS3ACredentialsInURL.java |  20 +-
 .../hadoop/fs/s3a/ITestS3AMiscOperations.java   |   7 +
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  17 +-
 .../fs/s3a/TestS3AAWSCredentialsProvider.java   |   2 +
 .../s3a/s3guard/DynamoDBLocalClientFactory.java |   3 +
 18 files changed, 1471 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/268ab4e0/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 183faa5..ede1f1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -956,6 +956,65 @@
 
 
 
+  fs.s3a.assumed.role.arn
+  
+  
+AWS ARN for the role to be assumed.
+Required if the fs.s3a.aws.credentials.provider contains
+org.apache.hadoop.fs.s3a.AssumedRoleCredentialProvider
+  
+
+
+
+  fs.s3a.assumed.role.session.name
+  
+  
+Session name for the assumed role, must be valid characters according to
+the AWS APIs.
+Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+If not set, one is generated from the current Hadoop/Kerberos username.
+  
+
+
+
+  fs.s3a.assumed.role.session.duration
+  30m
+  
+Duration of assumed roles before a refresh is attempted.
+Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+  
+
+
+
+  fs.s3a.assumed.role.policy
+  
+  
+JSON policy containing more restrictions to apply to the role.
+Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+  
+
+
+
+  fs.s3a.assumed.role.sts.endpoint
+  
+  
+AWS Simple Token Service Endpoint. If unset, uses the default endpoint.
+Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+  
+
+
+
+  fs.s3a.assumed.role.credentials.provider
+  org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider
+  
+List of credential providers to authenticate with the STS endpoint and
+retrieve short-lived role credentials.
+Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+If unset, uses "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider".
+  
+
+
+
   fs.s3a.connection.maximum
   15
   Controls the maximum number of simultaneous connections to 
S3.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/268ab4e0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
index d4ec2d6..e0bee0f 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/AWSCredentialProviderList.java
@@ -26,12 +26,16 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.IOUtils;
+
 import org.slf4j.Logg

hadoop git commit: HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

2018-01-17 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 db8345fa9 -> 082a707ba


HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential 
read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

(cherry picked from commit 9195a6e302028ed3921d1016ac2fa5754f06ebf0)
(cherry picked from commit 55142849db02a9191db0dd6f4e1401ff19ec242a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/082a707b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/082a707b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/082a707b

Branch: refs/heads/branch-3
Commit: 082a707bae4bb97444a34c00eecd62975807388d
Parents: db8345f
Author: Sammi Chen 
Authored: Wed Jan 17 15:55:59 2018 +0800
Committer: Sammi Chen 
Committed: Wed Jan 17 16:16:03 2018 +0800

--
 .../dev-support/findbugs-exclude.xml|   8 +
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 ++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +++-
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 +--
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 ++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 +++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 ++
 8 files changed, 407 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/082a707b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index 40d78d0..c55f8e3 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,4 +15,12 @@
limitations under the License.
 -->
 
+
+
+
+
+
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/082a707b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
new file mode 100644
index 000..e5bfc2c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.aliyun.oss;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Used by {@link AliyunOSSInputStream} as an task that submitted
+ * to the thread pool.
+ * Each AliyunOSSFileReaderTask reads one part of the file so that
+ * we can accelerate the sequential read.
+ */
+public class AliyunOSSFileReaderTask implements Runnable {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
+
+  private String key;
+  private AliyunOSSFileSystemStore store;
+  private ReadBuffer readBuffer;
+  private static final int MAX_RETRIES = 3;
+  private RetryPolicy retryPolicy;
+
+  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
+  ReadBuffer readBuffer) {
+this.key = key;
+this.store = store;
+this.readBuffer = readBuffer;
+RetryPolicy defaultPolicy =
+RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+MAX_RETRIES, 3, TimeUnit.SECONDS);
+Map, RetryPolicy> policies = new HashMap<>();
+policies.put(IOException.class, default

hadoop git commit: HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

2018-01-17 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8e7ce0eb4 -> 896dc7c78


HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential 
read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

(cherry picked from commit 9195a6e302028ed3921d1016ac2fa5754f06ebf0)
(cherry picked from commit 55142849db02a9191db0dd6f4e1401ff19ec242a)
(cherry picked from commit 082a707bae4bb97444a34c00eecd62975807388d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/896dc7c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/896dc7c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/896dc7c7

Branch: refs/heads/branch-2
Commit: 896dc7c7801adaa4460fa6c19a4f452a6a6112d8
Parents: 8e7ce0e
Author: Sammi Chen 
Authored: Wed Jan 17 15:55:59 2018 +0800
Committer: Sammi Chen 
Committed: Wed Jan 17 16:36:03 2018 +0800

--
 .../dev-support/findbugs-exclude.xml|   8 +
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 ++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +++-
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 +--
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 ++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 +++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 ++
 8 files changed, 407 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/896dc7c7/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index 40d78d0..c55f8e3 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,4 +15,12 @@
limitations under the License.
 -->
 
+
+
+
+
+
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/896dc7c7/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
new file mode 100644
index 000..e5bfc2c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.aliyun.oss;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Used by {@link AliyunOSSInputStream} as an task that submitted
+ * to the thread pool.
+ * Each AliyunOSSFileReaderTask reads one part of the file so that
+ * we can accelerate the sequential read.
+ */
+public class AliyunOSSFileReaderTask implements Runnable {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
+
+  private String key;
+  private AliyunOSSFileSystemStore store;
+  private ReadBuffer readBuffer;
+  private static final int MAX_RETRIES = 3;
+  private RetryPolicy retryPolicy;
+
+  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
+  ReadBuffer readBuffer) {
+this.key = key;
+this.store = store;
+this.readBuffer = readBuffer;
+RetryPolicy defaultPolicy =
+RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+MAX_RETRIES, 3, TimeUnit.SECONDS);
+Map, RetryPolicy> pol

hadoop git commit: HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

2018-01-17 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 28f69755f -> 622f6b65d


HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential 
read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)

(cherry picked from commit 9195a6e302028ed3921d1016ac2fa5754f06ebf0)
(cherry picked from commit 55142849db02a9191db0dd6f4e1401ff19ec242a)
(cherry picked from commit 082a707bae4bb97444a34c00eecd62975807388d)
(cherry picked from commit 896dc7c7801adaa4460fa6c19a4f452a6a6112d8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/622f6b65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/622f6b65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/622f6b65

Branch: refs/heads/branch-2.9
Commit: 622f6b65d684ce498a811784a229fb0386745711
Parents: 28f6975
Author: Sammi Chen 
Authored: Wed Jan 17 15:55:59 2018 +0800
Committer: Sammi Chen 
Committed: Wed Jan 17 16:37:25 2018 +0800

--
 .../dev-support/findbugs-exclude.xml|   8 +
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 ++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +++-
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 +--
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 ++
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 +++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 ++
 8 files changed, 407 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/622f6b65/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index 40d78d0..c55f8e3 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,4 +15,12 @@
limitations under the License.
 -->
 
+
+
+
+
+
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/622f6b65/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
new file mode 100644
index 000..e5bfc2c
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.aliyun.oss;
+
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Used by {@link AliyunOSSInputStream} as an task that submitted
+ * to the thread pool.
+ * Each AliyunOSSFileReaderTask reads one part of the file so that
+ * we can accelerate the sequential read.
+ */
+public class AliyunOSSFileReaderTask implements Runnable {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
+
+  private String key;
+  private AliyunOSSFileSystemStore store;
+  private ReadBuffer readBuffer;
+  private static final int MAX_RETRIES = 3;
+  private RetryPolicy retryPolicy;
+
+  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
+  ReadBuffer readBuffer) {
+this.key = key;
+this.store = store;
+this.readBuffer = readBuffer;
+RetryPolicy defaultPolicy =
+RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+

[2/4] hadoop git commit: HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. Contributed by Vinayakumar B.

2018-01-17 Thread vinayakumarb
HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. 
Contributed by Vinayakumar B.

(cherry picked from commit 09efdfe9e13c9695867ce4034aa6ec970c2032f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3eeea0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3eeea0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3eeea0b

Branch: refs/heads/branch-2
Commit: b3eeea0b6e27f9e74c35d2a930b541617fc7c8f2
Parents: 896dc7c
Author: Vinayakumar B 
Authored: Wed Jan 17 14:16:48 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Jan 17 14:22:29 2018 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++
 .../hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java | 5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3eeea0b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 164061b..cefa6f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -557,6 +557,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";
   public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + 
DFS_DATANODE_HTTP_DEFAULT_PORT;
+  public static final String  DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT =
+  "dfs.datanode.http.internal-proxy.port";
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
   public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3eeea0b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index ad830f0..a3b3b65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -74,6 +74,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT;
 
 public class DatanodeHttpServer implements Closeable {
   private final HttpServer2 infoServer;
@@ -99,12 +100,14 @@ public class DatanodeHttpServer implements Closeable {
 
 Configuration confForInfoServer = new Configuration(conf);
 confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
+int proxyPort =
+confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("datanode")
 .setConf(confForInfoServer)
 .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
 .hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
-.addEndpoint(URI.create("http://localhost:0";))
+.addEndpoint(URI.create("http://localhost:"; + proxyPort))
 .setFindPort(true);
 
 final boolean xFrameEnabled = conf.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3eeea0b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 4ce9e76..1501a03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-

[1/4] hadoop git commit: HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. Contributed by Vinayakumar B.

2018-01-17 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 896dc7c78 -> b3eeea0b6
  refs/heads/branch-2.8 5d5fc4ec2 -> 34f08f740
  refs/heads/branch-2.9 622f6b65d -> 62b82d7f4
  refs/heads/trunk 268ab4e02 -> 09efdfe9e


HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. 
Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09efdfe9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09efdfe9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09efdfe9

Branch: refs/heads/trunk
Commit: 09efdfe9e13c9695867ce4034aa6ec970c2032f1
Parents: 268ab4e
Author: Vinayakumar B 
Authored: Wed Jan 17 14:16:48 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Jan 17 14:16:48 2018 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++
 .../hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java | 5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09efdfe9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2825cc9..f53badc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -650,6 +650,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";
   public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 9864;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + 
DFS_DATANODE_HTTP_DEFAULT_PORT;
+  public static final String  DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT =
+  "dfs.datanode.http.internal-proxy.port";
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
   public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09efdfe9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index b51b1fc..2e46b28 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -72,6 +72,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT;
 
 public class DatanodeHttpServer implements Closeable {
   private final HttpServer2 infoServer;
@@ -97,12 +98,14 @@ public class DatanodeHttpServer implements Closeable {
 
 Configuration confForInfoServer = new Configuration(conf);
 confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+int proxyPort =
+confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("datanode")
 .setConf(confForInfoServer)
 .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
 .hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
-.addEndpoint(URI.create("http://localhost:0";))
+.addEndpoint(URI.create("http://localhost:"; + proxyPort))
 .setFindPort(true);
 
 final boolean xFrameEnabled = conf.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09efdfe9/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index cd365be..7a

[3/4] hadoop git commit: HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. Contributed by Vinayakumar B.

2018-01-17 Thread vinayakumarb
HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. 
Contributed by Vinayakumar B.

(cherry picked from commit 09efdfe9e13c9695867ce4034aa6ec970c2032f1)
(cherry picked from commit b3eeea0b6e27f9e74c35d2a930b541617fc7c8f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62b82d7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62b82d7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62b82d7f

Branch: refs/heads/branch-2.9
Commit: 62b82d7f4116cb831f1071cacb01604b8f67e7d1
Parents: 622f6b6
Author: Vinayakumar B 
Authored: Wed Jan 17 14:16:48 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Jan 17 14:23:33 2018 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++
 .../hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java | 5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62b82d7f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 519e39b..daf9907 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -557,6 +557,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";
   public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + 
DFS_DATANODE_HTTP_DEFAULT_PORT;
+  public static final String  DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT =
+  "dfs.datanode.http.internal-proxy.port";
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
   public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62b82d7f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index ad830f0..a3b3b65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -74,6 +74,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT;
 
 public class DatanodeHttpServer implements Closeable {
   private final HttpServer2 infoServer;
@@ -99,12 +100,14 @@ public class DatanodeHttpServer implements Closeable {
 
 Configuration confForInfoServer = new Configuration(conf);
 confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
+int proxyPort =
+confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("datanode")
 .setConf(confForInfoServer)
 .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
 .hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
-.addEndpoint(URI.create("http://localhost:0";))
+.addEndpoint(URI.create("http://localhost:"; + proxyPort))
 .setFindPort(true);
 
 final boolean xFrameEnabled = conf.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62b82d7f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c535368..bef80c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-def

[4/4] hadoop git commit: HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. Contributed by Vinayakumar B.

2018-01-17 Thread vinayakumarb
HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. 
Contributed by Vinayakumar B.

(cherry picked from commit 09efdfe9e13c9695867ce4034aa6ec970c2032f1)
(cherry picked from commit b3eeea0b6e27f9e74c35d2a930b541617fc7c8f2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34f08f74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34f08f74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34f08f74

Branch: refs/heads/branch-2.8
Commit: 34f08f740f7d03008f221fa601df27b605154349
Parents: 5d5fc4e
Author: Vinayakumar B 
Authored: Wed Jan 17 14:16:48 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Jan 17 14:24:12 2018 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++
 .../hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java | 5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34f08f74/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2139921..990767a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -514,6 +514,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";
   public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + 
DFS_DATANODE_HTTP_DEFAULT_PORT;
+  public static final String  DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT =
+  "dfs.datanode.http.internal-proxy.port";
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
   public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34f08f74/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index ad830f0..a3b3b65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -74,6 +74,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT;
 
 public class DatanodeHttpServer implements Closeable {
   private final HttpServer2 infoServer;
@@ -99,12 +100,14 @@ public class DatanodeHttpServer implements Closeable {
 
 Configuration confForInfoServer = new Configuration(conf);
 confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
+int proxyPort =
+confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("datanode")
 .setConf(confForInfoServer)
 .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
 .hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
-.addEndpoint(URI.create("http://localhost:0";))
+.addEndpoint(URI.create("http://localhost:"; + proxyPort))
 .setFindPort(true);
 
 final boolean xFrameEnabled = conf.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34f08f74/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9b1d090..6015ac2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-def

hadoop git commit: HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. Contributed by Vinayakumar B.

2018-01-17 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 082a707ba -> 0802d8afa


HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. 
Contributed by Vinayakumar B.

(cherry picked from commit 09efdfe9e13c9695867ce4034aa6ec970c2032f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0802d8af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0802d8af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0802d8af

Branch: refs/heads/branch-3
Commit: 0802d8afa355d9a0683fdb2e9c4963e8fea8644f
Parents: 082a707
Author: Vinayakumar B 
Authored: Wed Jan 17 14:16:48 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Jan 17 14:28:09 2018 +0530

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++
 .../hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java | 5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0802d8af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1058b4a..9c51f25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -617,6 +617,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";
   public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 9864;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + 
DFS_DATANODE_HTTP_DEFAULT_PORT;
+  public static final String  DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT =
+  "dfs.datanode.http.internal-proxy.port";
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
   public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0802d8af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index b51b1fc..2e46b28 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -72,6 +72,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT;
 
 public class DatanodeHttpServer implements Closeable {
   private final HttpServer2 infoServer;
@@ -97,12 +98,14 @@ public class DatanodeHttpServer implements Closeable {
 
 Configuration confForInfoServer = new Configuration(conf);
 confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+int proxyPort =
+confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("datanode")
 .setConf(confForInfoServer)
 .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
 .hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
-.addEndpoint(URI.create("http://localhost:0";))
+.addEndpoint(URI.create("http://localhost:"; + proxyPort))
 .setFindPort(true);
 
 final boolean xFrameEnabled = conf.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0802d8af/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 190a06d..6be11b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/res

[Hadoop Wiki] Update of "Books" by Packt Publishing

2018-01-17 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "Books" page has been changed by Packt Publishing:
https://wiki.apache.org/hadoop/Books?action=diff&rev1=43&rev2=44

  
  
  == Hadoop Videos ==
+ === Learn by Example : HBase - The Hadoop Database (Video) ===
+ 
+ '''Name:'''  
[[https://www.packtpub.com/application-development/learn-example-hbase-hadoop-database-video|Learn
 by Example : HBase - The Hadoop Database (Video)]]
+ 
+ '''Author:''' Loonycorn
+ 
+ '''Publisher:''' Packt
+ 
+ '''Date of Publishing:''' December  2017
+ 
+ 25 solved examples to get you up to speed with HBase
+ 
  === The Ultimate Hands-on Hadoop (Video) ===
  
  '''Name:'''  
[[https://www.packtpub.com/big-data-and-business-intelligence/ultimate-hands-hadoop-video
 | The Ultimate Hands-on Hadoop (Video)]]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-7029. FileOutputCommitter is slow on filesystems lacking recursive delete. Contributed by Karthik Palaniappan

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 09efdfe9e -> 6e42d0582


MAPREDUCE-7029. FileOutputCommitter is slow on filesystems lacking recursive 
delete. Contributed by Karthik Palaniappan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e42d058
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e42d058
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e42d058

Branch: refs/heads/trunk
Commit: 6e42d058292d9656e9ebc9a47be13280e3c919ea
Parents: 09efdfe
Author: Jason Lowe 
Authored: Wed Jan 17 08:14:11 2018 -0600
Committer: Jason Lowe 
Committed: Wed Jan 17 08:14:11 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 22 +++
 .../src/main/resources/mapred-default.xml   | 11 ++
 .../lib/output/TestFileOutputCommitter.java | 39 ++--
 3 files changed, 68 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e42d058/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index 86af2cf..cbae575 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -87,6 +87,17 @@ public class FileOutputCommitter extends PathOutputCommitter 
{
   // default value to be 1 to keep consistent with previous behavior
   public static final int FILEOUTPUTCOMMITTER_FAILURE_ATTEMPTS_DEFAULT = 1;
 
+  // Whether tasks should delete their task temporary directories. This is
+  // purely an optimization for filesystems without O(1) recursive delete, as
+  // commitJob will recursively delete the entire job temporary directory.
+  // HDFS has O(1) recursive delete, so this parameter is left false by 
default.
+  // Users of object stores, for example, may want to set this to true. Note:
+  // this is only used if mapreduce.fileoutputcommitter.algorithm.version=2
+  public static final String FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED =
+  "mapreduce.fileoutputcommitter.task.cleanup.enabled";
+  public static final boolean
+  FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED_DEFAULT = false;
+
   private Path outputPath = null;
   private Path workPath = null;
   private final int algorithmVersion;
@@ -586,6 +597,17 @@ public class FileOutputCommitter extends 
PathOutputCommitter {
   mergePaths(fs, taskAttemptDirStatus, outputPath);
   LOG.info("Saved output of task '" + attemptId + "' to " +
   outputPath);
+
+  if (context.getConfiguration().getBoolean(
+  FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED,
+  FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED_DEFAULT)) {
+LOG.debug(String.format(
+"Deleting the temporary directory of '%s': '%s'",
+attemptId, taskAttemptPath));
+if(!fs.delete(taskAttemptPath, true)) {
+  LOG.warn("Could not delete " + taskAttemptPath);
+}
+  }
 }
   } else {
 LOG.warn("No Output found for " + attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e42d058/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 1e432ce..62f3dfa 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1495,6 +1495,17 @@
 
 
 
+  mapreduce.fileoutputcommitter.task.cleanup.enabled
+  false
+  Whether tasks should delete their task temporary directories. 
This is purely an
+optimization for filesystems without O(1) recursive delete, as commitJob 
will recursively delete
+the entire job temporary directory. HDFS h

hadoop git commit: Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)"

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b3eeea0b6 -> fd09f7215


Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve 
sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu 
Wu)"

This reverts commit 896dc7c7801adaa4460fa6c19a4f452a6a6112d8.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd09f721
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd09f721
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd09f721

Branch: refs/heads/branch-2
Commit: fd09f721510e6f77b48fd1ba7e511c6ab03a06ee
Parents: b3eeea0
Author: Jason Lowe 
Authored: Wed Jan 17 09:47:43 2018 -0600
Committer: Jason Lowe 
Committed: Wed Jan 17 09:47:43 2018 -0600

--
 .../dev-support/findbugs-exclude.xml|   8 -
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 --
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +---
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 ++-
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 --
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 ---
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 --
 8 files changed, 50 insertions(+), 407 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd09f721/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index c55f8e3..40d78d0 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,12 +15,4 @@
limitations under the License.
 -->
 
-
-
-
-
-
-
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd09f721/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
deleted file mode 100644
index e5bfc2c..000
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Used by {@link AliyunOSSInputStream} as an task that submitted
- * to the thread pool.
- * Each AliyunOSSFileReaderTask reads one part of the file so that
- * we can accelerate the sequential read.
- */
-public class AliyunOSSFileReaderTask implements Runnable {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
-
-  private String key;
-  private AliyunOSSFileSystemStore store;
-  private ReadBuffer readBuffer;
-  private static final int MAX_RETRIES = 3;
-  private RetryPolicy retryPolicy;
-
-  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
-  ReadBuffer readBuffer) {
-this.key = key;
-this.store = store;
-this.readBuffer = readBuffer;
-RetryPolicy defaultPolicy =
-RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-MAX_RETRIES, 3, TimeUnit.SECONDS);
-Map, RetryPolicy> policies = new HashMap<>();
-policies.put(IOException.class, defaultPolicy);
-policies.put(IndexOutOfBoundsException.class,
- 

hadoop git commit: Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)"

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 55142849d -> b1cc14e9c


Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve 
sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu 
Wu)"

This reverts commit 55142849db02a9191db0dd6f4e1401ff19ec242a.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1cc14e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1cc14e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1cc14e9

Branch: refs/heads/branch-3.0
Commit: b1cc14e9cbfacdab61adb4f35cc66825e0ba2291
Parents: 5514284
Author: Jason Lowe 
Authored: Wed Jan 17 09:53:24 2018 -0600
Committer: Jason Lowe 
Committed: Wed Jan 17 09:53:24 2018 -0600

--
 .../dev-support/findbugs-exclude.xml|   8 -
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 --
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +---
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 ++-
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 --
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 ---
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 --
 8 files changed, 50 insertions(+), 407 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1cc14e9/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index c55f8e3..40d78d0 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,12 +15,4 @@
limitations under the License.
 -->
 
-
-
-
-
-
-
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1cc14e9/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
deleted file mode 100644
index e5bfc2c..000
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Used by {@link AliyunOSSInputStream} as an task that submitted
- * to the thread pool.
- * Each AliyunOSSFileReaderTask reads one part of the file so that
- * we can accelerate the sequential read.
- */
-public class AliyunOSSFileReaderTask implements Runnable {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
-
-  private String key;
-  private AliyunOSSFileSystemStore store;
-  private ReadBuffer readBuffer;
-  private static final int MAX_RETRIES = 3;
-  private RetryPolicy retryPolicy;
-
-  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
-  ReadBuffer readBuffer) {
-this.key = key;
-this.store = store;
-this.readBuffer = readBuffer;
-RetryPolicy defaultPolicy =
-RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-MAX_RETRIES, 3, TimeUnit.SECONDS);
-Map, RetryPolicy> policies = new HashMap<>();
-policies.put(IOException.class, defaultPolicy);
-policies.put(IndexOutOfBoundsException.class

hadoop git commit: Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)"

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 62b82d7f4 -> 98835e14a


Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve 
sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu 
Wu)"

This reverts commit 622f6b65d684ce498a811784a229fb0386745711.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98835e14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98835e14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98835e14

Branch: refs/heads/branch-2.9
Commit: 98835e14ae7595814ad3b556370057c2c256471e
Parents: 62b82d7
Author: Jason Lowe 
Authored: Wed Jan 17 09:59:31 2018 -0600
Committer: Jason Lowe 
Committed: Wed Jan 17 09:59:31 2018 -0600

--
 .../dev-support/findbugs-exclude.xml|   8 -
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 --
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +---
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 ++-
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 --
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 ---
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 --
 8 files changed, 50 insertions(+), 407 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98835e14/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index c55f8e3..40d78d0 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,12 +15,4 @@
limitations under the License.
 -->
 
-
-
-
-
-
-
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98835e14/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
deleted file mode 100644
index e5bfc2c..000
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Used by {@link AliyunOSSInputStream} as an task that submitted
- * to the thread pool.
- * Each AliyunOSSFileReaderTask reads one part of the file so that
- * we can accelerate the sequential read.
- */
-public class AliyunOSSFileReaderTask implements Runnable {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
-
-  private String key;
-  private AliyunOSSFileSystemStore store;
-  private ReadBuffer readBuffer;
-  private static final int MAX_RETRIES = 3;
-  private RetryPolicy retryPolicy;
-
-  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
-  ReadBuffer readBuffer) {
-this.key = key;
-this.store = store;
-this.readBuffer = readBuffer;
-RetryPolicy defaultPolicy =
-RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-MAX_RETRIES, 3, TimeUnit.SECONDS);
-Map, RetryPolicy> policies = new HashMap<>();
-policies.put(IOException.class, defaultPolicy);
-policies.put(IndexOutOfBoundsException.class

hadoop git commit: Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu Wu)"

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 0802d8afa -> 702d280c8


Revert "HADOOP-15027. AliyunOSS: Support multi-thread pre-read to improve 
sequential read from Hadoop to Aliyun OSS performance. (Contributed by Jinhu 
Wu)"

This reverts commit 082a707bae4bb97444a34c00eecd62975807388d.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/702d280c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/702d280c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/702d280c

Branch: refs/heads/branch-3
Commit: 702d280c853fa67c9fcb10959b47cc19f03a98a7
Parents: 0802d8a
Author: Jason Lowe 
Authored: Wed Jan 17 10:11:24 2018 -0600
Committer: Jason Lowe 
Committed: Wed Jan 17 10:11:24 2018 -0600

--
 .../dev-support/findbugs-exclude.xml|   8 -
 .../fs/aliyun/oss/AliyunOSSFileReaderTask.java  | 109 --
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  31 +---
 .../fs/aliyun/oss/AliyunOSSInputStream.java | 149 ++-
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java|  12 --
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  13 +-
 .../apache/hadoop/fs/aliyun/oss/ReadBuffer.java |  86 ---
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  49 --
 8 files changed, 50 insertions(+), 407 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/702d280c/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml 
b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
index c55f8e3..40d78d0 100644
--- a/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
+++ b/hadoop-tools/hadoop-aliyun/dev-support/findbugs-exclude.xml
@@ -15,12 +15,4 @@
limitations under the License.
 -->
 
-
-
-
-
-
-
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/702d280c/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
deleted file mode 100644
index e5bfc2c..000
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileReaderTask.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.aliyun.oss;
-
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Used by {@link AliyunOSSInputStream} as an task that submitted
- * to the thread pool.
- * Each AliyunOSSFileReaderTask reads one part of the file so that
- * we can accelerate the sequential read.
- */
-public class AliyunOSSFileReaderTask implements Runnable {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(AliyunOSSFileReaderTask.class);
-
-  private String key;
-  private AliyunOSSFileSystemStore store;
-  private ReadBuffer readBuffer;
-  private static final int MAX_RETRIES = 3;
-  private RetryPolicy retryPolicy;
-
-  public AliyunOSSFileReaderTask(String key, AliyunOSSFileSystemStore store,
-  ReadBuffer readBuffer) {
-this.key = key;
-this.store = store;
-this.readBuffer = readBuffer;
-RetryPolicy defaultPolicy =
-RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-MAX_RETRIES, 3, TimeUnit.SECONDS);
-Map, RetryPolicy> policies = new HashMap<>();
-policies.put(IOException.class, defaultPolicy);
-policies.put(IndexOutOfBoundsException.class,
- 

hadoop git commit: MAPREDUCE-7029. FileOutputCommitter is slow on filesystems lacking recursive delete. Contributed by Karthik Palaniappan

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fd09f7215 -> c228a7c70


MAPREDUCE-7029. FileOutputCommitter is slow on filesystems lacking recursive 
delete. Contributed by Karthik Palaniappan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c228a7c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c228a7c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c228a7c7

Branch: refs/heads/branch-2
Commit: c228a7c707105025978ace18b6d42bc6b75c94ca
Parents: fd09f72
Author: Jason Lowe 
Authored: Wed Jan 17 10:32:06 2018 -0600
Committer: Jason Lowe 
Committed: Wed Jan 17 10:32:06 2018 -0600

--
 .../lib/output/FileOutputCommitter.java | 22 +++
 .../src/main/resources/mapred-default.xml   | 11 ++
 .../lib/output/TestFileOutputCommitter.java | 40 +---
 3 files changed, 68 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c228a7c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index 2291197..45cfca3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -85,6 +85,17 @@ public class FileOutputCommitter extends OutputCommitter {
   // default value to be 1 to keep consistent with previous behavior
   public static final int FILEOUTPUTCOMMITTER_FAILURE_ATTEMPTS_DEFAULT = 1;
 
+  // Whether tasks should delete their task temporary directories. This is
+  // purely an optimization for filesystems without O(1) recursive delete, as
+  // commitJob will recursively delete the entire job temporary directory.
+  // HDFS has O(1) recursive delete, so this parameter is left false by 
default.
+  // Users of object stores, for example, may want to set this to true. Note:
+  // this is only used if mapreduce.fileoutputcommitter.algorithm.version=2
+  public static final String FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED =
+  "mapreduce.fileoutputcommitter.task.cleanup.enabled";
+  public static final boolean
+  FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED_DEFAULT = false;
+
   private Path outputPath = null;
   private Path workPath = null;
   private final int algorithmVersion;
@@ -586,6 +597,17 @@ public class FileOutputCommitter extends OutputCommitter {
   mergePaths(fs, taskAttemptDirStatus, outputPath);
   LOG.info("Saved output of task '" + attemptId + "' to " +
   outputPath);
+
+  if (context.getConfiguration().getBoolean(
+  FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED,
+  FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED_DEFAULT)) {
+LOG.debug(String.format(
+"Deleting the temporary directory of '%s': '%s'",
+attemptId, taskAttemptPath));
+if(!fs.delete(taskAttemptPath, true)) {
+  LOG.warn("Could not delete " + taskAttemptPath);
+}
+  }
 }
   } else {
 LOG.warn("No Output found for " + attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c228a7c7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index b182114..c0f287b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1515,6 +1515,17 @@
 
 
 
+  mapreduce.fileoutputcommitter.task.cleanup.enabled
+  false
+  Whether tasks should delete their task temporary directories. 
This is purely an
+optimization for filesystems without O(1) recursive delete, as commitJob 
will recursively delete
+the entire job temporary directory. HDFS has O

hadoop git commit: HDFS-11847. Enhance dfsadmin listOpenFiles command to list files blocking datanode decommissioning.

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 b1cc14e9c -> 710555e15


HDFS-11847. Enhance dfsadmin listOpenFiles command to list files blocking 
datanode decommissioning.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/710555e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/710555e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/710555e1

Branch: refs/heads/branch-3.0
Commit: 710555e1503c9690852fb071ace53c06c3e793f1
Parents: b1cc14e
Author: Manoj Govindassamy 
Authored: Tue Jan 2 14:59:36 2018 -0800
Committer: Jason Lowe 
Committed: Wed Jan 17 12:44:09 2018 -0600

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  16 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |   8 +
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|   7 +
 .../hadoop/hdfs/protocol/ClientProtocol.java|  16 ++
 .../hadoop/hdfs/protocol/OpenFilesIterator.java |  36 +++-
 .../ClientNamenodeProtocolTranslatorPB.java |  18 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  26 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |   7 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |   7 +-
 .../server/blockmanagement/BlockManager.java|   2 +-
 .../blockmanagement/DatanodeAdminManager.java   |  25 ++-
 .../blockmanagement/DatanodeDescriptor.java |  24 ++-
 .../federation/router/RouterRpcServer.java  |  10 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  49 -
 .../hdfs/server/namenode/NameNodeRpcServer.java |  10 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  36 +++-
 .../src/site/markdown/HDFSCommands.md   |   2 +-
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java |  18 +-
 .../apache/hadoop/hdfs/TestDecommission.java| 177 +++
 .../org/apache/hadoop/hdfs/TestHdfsAdmin.java   |   4 +-
 .../blockmanagement/BlockManagerTestUtil.java   |  12 +-
 .../hdfs/server/namenode/TestLeaseManager.java  |  48 ++---
 .../hdfs/server/namenode/TestListOpenFiles.java |  27 ++-
 23 files changed, 522 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/710555e1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index ff61384..83c3b94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -131,6 +131,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
+import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.protocol.ReencryptionStatusIterator;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -3026,8 +3027,21 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*
* @throws IOException
*/
+  @Deprecated
   public RemoteIterator listOpenFiles() throws IOException {
 checkOpen();
-return new OpenFilesIterator(namenode, tracer);
+return listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+  }
+
+  /**
+   * Get a remote iterator to the open files list by type, managed by NameNode.
+   *
+   * @param openFilesTypes
+   * @throws IOException
+   */
+  public RemoteIterator listOpenFiles(
+  EnumSet openFilesTypes) throws IOException {
+checkOpen();
+return new OpenFilesIterator(namenode, tracer, openFilesTypes);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/710555e1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index cecd9d1..54b428e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -85,6 +85,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hado

hadoop git commit: HDFS-11848. Enhance dfsadmin listOpenFiles command to list files under a given path. Contributed by Yiqun Lin.

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 710555e15 -> cb07172e2


HDFS-11848. Enhance dfsadmin listOpenFiles command to list files under a given 
path. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb07172e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb07172e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb07172e

Branch: refs/heads/branch-3.0
Commit: cb07172e2179e95e28cc23b75c62f64193ec5786
Parents: 710555e
Author: Yiqun Lin 
Authored: Sat Jan 6 14:31:08 2018 +0800
Committer: Jason Lowe 
Committed: Wed Jan 17 12:50:06 2018 -0600

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 36 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|  5 +-
 .../hadoop/hdfs/protocol/OpenFilesIterator.java | 10 ++-
 .../ClientNamenodeProtocolTranslatorPB.java |  8 ++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  2 +-
 .../federation/router/RouterRpcServer.java  |  6 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 27 +---
 .../hdfs/server/namenode/LeaseManager.java  | 25 +--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 ++-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 23 +--
 .../src/site/markdown/HDFSCommands.md   |  4 +-
 .../apache/hadoop/hdfs/TestDecommission.java| 38 ++-
 .../org/apache/hadoop/hdfs/TestHdfsAdmin.java   |  4 +-
 .../hdfs/server/namenode/TestLeaseManager.java  |  8 ++-
 .../hdfs/server/namenode/TestListOpenFiles.java | 20 --
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 69 +++-
 19 files changed, 248 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb07172e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 83c3b94..c20e2a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3030,11 +3030,26 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   @Deprecated
   public RemoteIterator listOpenFiles() throws IOException {
 checkOpen();
-return listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
+return listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
+OpenFilesIterator.FILTER_PATH_DEFAULT);
   }
 
   /**
-   * Get a remote iterator to the open files list by type, managed by NameNode.
+   * Get a remote iterator to the open files list by path,
+   * managed by NameNode.
+   *
+   * @param path
+   * @throws IOException
+   */
+  public RemoteIterator listOpenFiles(String path)
+  throws IOException {
+checkOpen();
+return listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES), path);
+  }
+
+  /**
+   * Get a remote iterator to the open files list by type,
+   * managed by NameNode.
*
* @param openFilesTypes
* @throws IOException
@@ -3042,6 +3057,21 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   public RemoteIterator listOpenFiles(
   EnumSet openFilesTypes) throws IOException {
 checkOpen();
-return new OpenFilesIterator(namenode, tracer, openFilesTypes);
+return listOpenFiles(openFilesTypes,
+OpenFilesIterator.FILTER_PATH_DEFAULT);
+  }
+
+  /**
+   * Get a remote iterator to the open files list by type and path,
+   * managed by NameNode.
+   *
+   * @param openFilesTypes
+   * @param path
+   * @throws IOException
+   */
+  public RemoteIterator listOpenFiles(
+  EnumSet openFilesTypes, String path) throws IOException {
+checkOpen();
+return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb07172e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 54b428e..369a5bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hado

hadoop git commit: HDFS-12919. RBF: Support erasure coding methods in RouterRpcServer. Contributed by Inigo Goiri.

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 cb07172e2 -> 49e285ded


HDFS-12919. RBF: Support erasure coding methods in RouterRpcServer. Contributed 
by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49e285de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49e285de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49e285de

Branch: refs/heads/branch-3.0
Commit: 49e285ded511088b627f87545d9db1cc17636960
Parents: cb07172
Author: Inigo Goiri 
Authored: Mon Jan 15 12:21:24 2018 -0800
Committer: Jason Lowe 
Committed: Wed Jan 17 12:57:14 2018 -0600

--
 .../AddErasureCodingPolicyResponse.java |  24 ++
 .../server/federation/router/ErasureCoding.java | 198 +++
 .../federation/router/RouterRpcClient.java  |  65 -
 .../federation/router/RouterRpcServer.java  | 247 ++-
 .../server/federation/RouterDFSCluster.java |  12 +-
 .../server/federation/router/TestRouterRpc.java | 191 +-
 6 files changed, 603 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49e285de/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
index 2e8d081..dc77a47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddErasureCodingPolicyResponse.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 
 /**
@@ -65,4 +67,26 @@ public class AddErasureCodingPolicyResponse {
   + "error message is " + getErrorMsg();
 }
   }
+
+  @Override
+  public boolean equals(Object o) {
+if (o instanceof AddErasureCodingPolicyResponse) {
+  AddErasureCodingPolicyResponse other = (AddErasureCodingPolicyResponse) 
o;
+  return new EqualsBuilder()
+  .append(policy, other.policy)
+  .append(succeed, other.succeed)
+  .append(errorMsg, other.errorMsg)
+  .isEquals();
+}
+return false;
+  }
+
+  @Override
+  public int hashCode() {
+return new HashCodeBuilder(303855623, 582626729)
+.append(policy)
+.append(succeed)
+.append(errorMsg)
+.toHashCode();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49e285de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
new file mode 100644
index 000..d2b2d50
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java
@@ -0,0 +1,198 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static 
org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer.merge;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
+impo

hadoop git commit: HDFS-13004. TestLeaseRecoveryStriped.testLeaseRecovery is failing when safeLength is 0MB or larger than the test file. (Zsolt Venczel via lei)

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 49e285ded -> ce394be80


HDFS-13004. TestLeaseRecoveryStriped.testLeaseRecovery is failing when 
safeLength is 0MB or larger than the test file. (Zsolt Venczel via lei)

(cherry picked from commit 3bd9ea63df769345a9d02a404cfb61323a4cd7e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce394be8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce394be8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce394be8

Branch: refs/heads/branch-3.0
Commit: ce394be8090b604c119c0d47c12bd601bf9bd387
Parents: 49e285d
Author: Lei Xu 
Authored: Tue Jan 16 15:15:11 2018 -0800
Committer: Jason Lowe 
Committed: Wed Jan 17 12:59:53 2018 -0600

--
 .../java/org/apache/hadoop/hdfs/StripedFileTestUtil.java |  7 ---
 .../org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java | 11 +++
 2 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce394be8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 08bf20a..13ca390 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -363,11 +363,12 @@ public class StripedFileTestUtil {
 List> blockGroupList = new ArrayList<>();
 LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(srcPath.toString(), 
0L,
 Long.MAX_VALUE);
-int expectedNumGroup = 0;
+
 if (length > 0) {
-  expectedNumGroup = (length - 1) / blkGroupSize + 1;
+  int expectedNumGroup = (length - 1) / blkGroupSize + 1;
+
+  assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
 }
-assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
 
 final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
 final int cellSize = ecPolicy.getCellSize();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce394be8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 36ac8b3..d74f193 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -85,6 +85,7 @@ public class TestLeaseRecoveryStriped {
   private Configuration conf;
   private final Path dir = new Path("/" + this.getClass().getSimpleName());
   final Path p = new Path(dir, "testfile");
+  private final int testFileLength = (stripesPerBlock - 1) * stripeSize;
 
   @Before
   public void setup() throws IOException {
@@ -191,17 +192,20 @@ public class TestLeaseRecoveryStriped {
 
   private void runTest(int[] blockLengths, long safeLength) throws Exception {
 writePartialBlocks(blockLengths);
+
+int checkDataLength = Math.min(testFileLength, (int)safeLength);
+
 recoverLease();
 
 List oldGS = new ArrayList<>();
 oldGS.add(1001L);
-StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
+StripedFileTestUtil.checkData(dfs, p, checkDataLength,
 new ArrayList(), oldGS, blockGroupSize);
 // After recovery, storages are reported by primary DN. we should verify
 // storages reported by blockReport.
 cluster.restartNameNode(true);
 cluster.waitFirstBRCompleted(0, 1);
-StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
+StripedFileTestUtil.checkData(dfs, p, checkDataLength,
 new ArrayList(), oldGS, blockGroupSize);
   }
 
@@ -219,12 +223,11 @@ public class TestLeaseRecoveryStriped {
 final FSDataOutputStream out = dfs.create(p);
 final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out
 .getWrappedStream();
-int length = (stripesPerBlock - 1) * stripeSize;
 int[] posToKill = getPosToKill(blockLengths);
 int checkingPos = nextCheckingPos(posToKill, 0);
 Set stoppedStreamerIndexes = new HashSet<>();
 try {
-  for (int pos = 0; pos < length; pos++) {
+  for (int pos = 0; pos < testFileLength; pos++) {
 out.write(StripedFileTestUtil.getByte(pos));
 if (pos == check

hadoop git commit: HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. Contributed by Vinayakumar B.

2018-01-17 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 ce394be80 -> 244369243


HDFS-9049. Make Datanode Netty reverse proxy port to be configurable. 
Contributed by Vinayakumar B.

(cherry picked from commit 09efdfe9e13c9695867ce4034aa6ec970c2032f1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24436924
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24436924
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24436924

Branch: refs/heads/branch-3.0
Commit: 24436924340428f91b28906457e2a14be1f3722c
Parents: ce394be
Author: Vinayakumar B 
Authored: Wed Jan 17 14:16:48 2018 +0530
Committer: Jason Lowe 
Committed: Wed Jan 17 13:01:52 2018 -0600

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 ++
 .../hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java | 5 -
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 9 +
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24436924/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 1058b4a..9c51f25 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -617,6 +617,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = 
"dfs.datanode.http.address";
   public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 9864;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + 
DFS_DATANODE_HTTP_DEFAULT_PORT;
+  public static final String  DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT =
+  "dfs.datanode.http.internal-proxy.port";
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
   
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
   public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24436924/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index b51b1fc..2e46b28 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -72,6 +72,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT;
 
 public class DatanodeHttpServer implements Closeable {
   private final HttpServer2 infoServer;
@@ -97,12 +98,14 @@ public class DatanodeHttpServer implements Closeable {
 
 Configuration confForInfoServer = new Configuration(conf);
 confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+int proxyPort =
+confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0);
 HttpServer2.Builder builder = new HttpServer2.Builder()
 .setName("datanode")
 .setConf(confForInfoServer)
 .setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
 .hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
-.addEndpoint(URI.create("http://localhost:0";))
+.addEndpoint(URI.create("http://localhost:"; + proxyPort))
 .setFindPort(true);
 
 final boolean xFrameEnabled = conf.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24436924/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 190a06d..6be11b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/re

hadoop git commit: HDFS-12843. Ozone: Client: TestOzoneRpcClient#testPutKeyRatisThreeNodes is failing. Contributed by Mukul Kumar Singh.

2018-01-17 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 18f9fea7c -> 94fd8201f


HDFS-12843. Ozone: Client: TestOzoneRpcClient#testPutKeyRatisThreeNodes is 
failing. Contributed by  Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94fd8201
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94fd8201
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94fd8201

Branch: refs/heads/HDFS-7240
Commit: 94fd8201f17e41aedbdc76fbc4a1d5704a4a4729
Parents: 18f9fea
Author: Anu Engineer 
Authored: Wed Jan 17 10:42:38 2018 -0800
Committer: Anu Engineer 
Committed: Wed Jan 17 10:42:38 2018 -0800

--
 .../org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94fd8201/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index f7cb7cf..3e461cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneProtos;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.scm.protocolPB.
 StorageContainerLocationProtocolClientSideTranslatorPB;
@@ -89,7 +90,8 @@ public class TestOzoneRpcClient {
 OzoneConfiguration conf = new OzoneConfiguration();
 conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
 OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(5)
+conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 1);
+cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(10)
 .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
 conf.set("ozone.client.protocol",
 "org.apache.hadoop.ozone.client.rpc.RpcClient");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6619. AMRMClient Changes to use the PlacementConstraint and SchcedulingRequest objects. (Arun Suresh via wangda)

2018-01-17 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 f476b832b -> 97141f1e0


YARN-6619. AMRMClient Changes to use the PlacementConstraint and 
SchcedulingRequest objects. (Arun Suresh via wangda)

Change-Id: I2b93dd173012d54f2c7c07a9e6a0507e4ba8ac02


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97141f1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97141f1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97141f1e

Branch: refs/heads/YARN-6592
Commit: 97141f1e05ae65fd3c6afec87e5f1ac0464f29ed
Parents: f476b83
Author: Wangda Tan 
Authored: Wed Jan 17 11:36:26 2018 -0800
Committer: Wangda Tan 
Committed: Wed Jan 17 11:36:26 2018 -0800

--
 .../hadoop/yarn/client/api/AMRMClient.java  |  38 +++-
 .../yarn/client/api/async/AMRMClientAsync.java  |  48 +
 .../api/async/impl/AMRMClientAsyncImpl.java |  49 -
 .../yarn/client/api/impl/AMRMClientImpl.java| 142 -
 .../client/api/impl/BaseAMRMClientTest.java | 212 +++
 .../yarn/client/api/impl/TestAMRMClient.java| 156 +-
 .../TestAMRMClientPlacementConstraints.java | 204 ++
 .../rmcontainer/RMContainerImpl.java|   3 +
 .../scheduler/AbstractYarnScheduler.java|   1 +
 .../scheduler/SchedulerApplicationAttempt.java  |   1 +
 .../constraint/PlacementConstraintsUtil.java|   4 +-
 11 files changed, 700 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97141f1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index d3d1974..914a146 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.client.api;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
 import java.util.function.Supplier;
 import java.util.List;
 
@@ -39,7 +41,9 @@ import 
org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ProfileCapability;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -554,6 +558,18 @@ public abstract class AMRMClient extends
   }
 
   /**
+   * Add a Collection of SchedulingRequests. The AMRMClient will ensure that
+   * all requests in the same batch are sent in the same allocate call.
+   * @param schedulingRequests Collection of Scheduling Requests.
+   */
+  @Public
+  @InterfaceStability.Unstable
+  public void addSchedulingRequests(
+  Collection schedulingRequests) {
+
+  }
+
+  /**
* Register the application master. This must be called before any 
* other interaction
* @param appHostName Name of the host on which master is running
@@ -568,7 +584,27 @@ public abstract class AMRMClient extends
  int appHostPort,
  String appTrackingUrl) 
throws YarnException, IOException;
-  
+
+  /**
+   * Register the application master. This must be called before any
+   * other interaction
+   * @param appHostName Name of the host on which master is running
+   * @param appHostPort Port master is listening on
+   * @param appTrackingUrl URL at which the master info can be seen
+   * @param placementConstraints Placement Constraints mappings.
+   * @return RegisterApplicationMasterResponse
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @InterfaceStability.Unstable
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+  String appHostName, int appHostPort, String appTrackingUrl,
+  Map, PlacementConstraint> placementConstraints)
+  throws YarnException, IOException {
+throw new YarnException("Not supported");
+  }
+
   /**
* Request additional containers and receive new container allocations.
* 

hadoop git commit: YARN-7758. Add an additional check to the validity of container and application ids passed to container-executor. Contributed by Yufei Gu.

2018-01-17 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c228a7c70 -> 86a2ac94b


YARN-7758. Add an additional check to the validity of container and application 
ids passed to container-executor. Contributed by Yufei Gu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86a2ac94
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86a2ac94
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86a2ac94

Branch: refs/heads/branch-2
Commit: 86a2ac94b1c46891bfcfa4d9dffcaa636596c271
Parents: c228a7c
Author: Miklos Szegedi 
Authored: Wed Jan 17 12:56:10 2018 -0800
Committer: Miklos Szegedi 
Committed: Wed Jan 17 12:56:10 2018 -0800

--
 .../main/native/container-executor/impl/container-executor.c| 3 ++-
 .../src/main/native/container-executor/impl/main.c  | 5 +
 .../src/main/native/container-executor/impl/util.h  | 3 ++-
 3 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a2ac94/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1dd9a1a..c1a42ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1068,7 +1068,8 @@ int create_log_dirs(const char *app_id, char * const * 
log_dirs) {
   for(log_root=log_dirs; *log_root != NULL; ++log_root) {
 char *app_log_dir = get_app_log_directory(*log_root, app_id);
 int result = check_nm_local_dir(nm_uid, *log_root);
-if (result != 0) {
+if (result != 0 && app_log_dir != NULL) {
+  free(app_log_dir);
   app_log_dir = NULL;
 }
 if (app_log_dir == NULL) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a2ac94/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index 7a4a9d9..a1b5ebc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -21,6 +21,7 @@
 #include "container-executor.h"
 #include "util.h"
 #include "get_executable.h"
+#include "utils/string-utils.h"
 
 #include 
 #include 
@@ -351,6 +352,10 @@ static int validate_run_as_user_commands(int argc, char 
**argv, int *operation)
 }
 cmd_input.app_id = argv[optind++];
 cmd_input.container_id = argv[optind++];
+if (!validate_container_id(cmd_input.container_id)) {
+  fprintf(ERRORFILE, "Invalid container id %s\n", cmd_input.container_id);
+  return INVALID_CONTAINER_ID;
+}
 cmd_input.cred_file = argv[optind++];
 cmd_input.local_dirs = argv[optind++];// good local dirs as a comma 
separated list
 cmd_input.log_dirs = argv[optind++];// good log dirs as a comma separated 
list

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a2ac94/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
index c4979f6..c4518d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
@@ -67,7 +67,8 @@ enum errorcodes {
   ERROR_SANITIZING_DOCKER_COMMAND = 39,
   DOCKER_IMAGE_I

[01/11] hadoop git commit: YARN-7670. Modifications to the ResourceScheduler API to support SchedulingRequests. (asuresh) [Forced Update!]

2018-01-17 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 97141f1e0 -> 4582e7b38 (forced update)


YARN-7670. Modifications to the ResourceScheduler API to support 
SchedulingRequests. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39676318
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39676318
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39676318

Branch: refs/heads/YARN-6592
Commit: 396763189b7b0f1b9d219e2c12781615a664bda9
Parents: 3f037a1
Author: Arun Suresh 
Authored: Tue Dec 19 08:59:23 2017 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:44:20 2018 -0800

--
 .../scheduler/AbstractYarnScheduler.java| 18 +
 .../scheduler/ResourceScheduler.java| 13 
 .../scheduler/capacity/CapacityScheduler.java   | 78 ++--
 .../common/ResourceAllocationCommitter.java | 12 ++-
 .../scheduler/common/fica/FiCaSchedulerApp.java | 30 +---
 .../TestCapacitySchedulerAsyncScheduling.java   | 10 +--
 6 files changed, 138 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39676318/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index cf5e13b..1589d84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -296,6 +297,10 @@ public abstract class AbstractYarnScheduler
 return nodeTracker.getNodes(nodeFilter);
   }
 
+  public List getNodes(final NodeFilter filter) {
+return nodeTracker.getNodes(filter);
+  }
+
   public boolean shouldContainersBeAutoUpdated() {
 return this.autoUpdateContainers;
   }
@@ -1439,4 +1444,17 @@ public abstract class AbstractYarnScheduler
   throw new IOException(e);
 }
   }
+
+  /**
+   * Default implementation. Always returns false.
+   * @param appAttempt ApplicationAttempt.
+   * @param schedulingRequest SchedulingRequest.
+   * @param schedulerNode SchedulerNode.
+   * @return Success or not.
+   */
+  @Override
+  public boolean attemptAllocationOnNode(SchedulerApplicationAttempt 
appAttempt,
+  SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39676318/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
index d96d625..5a56ac7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
@@ -25,6 +25,7 @@ import 
org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Con

[05/11] hadoop git commit: YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos Karanasos via asuresh)

2018-01-17 Thread asuresh
YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos 
Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f650d39b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f650d39b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f650d39b

Branch: refs/heads/YARN-6592
Commit: f650d39b8a6890b55dd961781cb751e3a7c38c51
Parents: b3dd520
Author: Arun Suresh 
Authored: Fri Dec 22 13:26:30 2017 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:50:38 2018 -0800

--
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   6 +
 .../server/resourcemanager/RMContextImpl.java   |  13 +
 .../server/resourcemanager/ResourceManager.java |  13 +
 .../MemoryPlacementConstraintManager.java   | 282 +++
 .../constraint/PlacementConstraintManager.java  | 151 ++
 .../PlacementConstraintManagerService.java  |  93 ++
 .../scheduler/constraint/package-info.java  |  29 ++
 .../TestPlacementConstraintManagerService.java  | 182 
 9 files changed, 784 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f650d39b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 4d0c230..06a1d00 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -43,6 +43,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
@@ -109,6 +110,7 @@ public class RMActiveServiceContext {
   private RMAppLifetimeMonitor rmAppLifetimeMonitor;
   private QueueLimitCalculator queueLimitCalculator;
   private AllocationTagsManager allocationTagsManager;
+  private PlacementConstraintManager placementConstraintManager;
 
   public RMActiveServiceContext() {
 queuePlacementManager = new PlacementManager();
@@ -413,6 +415,19 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
+  public PlacementConstraintManager getPlacementConstraintManager() {
+return placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
+  public void setPlacementConstraintManager(
+  PlacementConstraintManager placementConstraintManager) {
+this.placementConstraintManager = placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
 return rmDelegatedNodeLabelsUpdater;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f650d39b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index 00da108..eb91a31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/

[07/11] hadoop git commit: YARN-7613. Implement Basic algorithm for constraint based placement. (Panagiotis Garefalakis via asuresh)

2018-01-17 Thread asuresh
YARN-7613. Implement Basic algorithm for constraint based placement. 
(Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bcd5cc1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bcd5cc1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bcd5cc1

Branch: refs/heads/YARN-6592
Commit: 2bcd5cc12430fce0adf93828c5f520d56780
Parents: e802a6c
Author: Arun Suresh 
Authored: Wed Dec 27 22:59:22 2017 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:51:47 2018 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../src/main/resources/yarn-default.xml |   8 +-
 .../rmcontainer/RMContainerImpl.java|  10 +-
 .../constraint/AllocationTagsManager.java   | 121 ++---
 .../algorithm/DefaultPlacementAlgorithm.java| 172 +++
 .../iterators/PopularTagsIterator.java  |  71 
 .../algorithm/iterators/SerialIterator.java |  53 ++
 .../algorithm/iterators/package-info.java   |  29 
 .../constraint/algorithm/package-info.java  |  29 
 .../constraint/processor/BatchedRequests.java   |  45 -
 .../processor/PlacementProcessor.java   |  32 ++--
 .../processor/SamplePlacementAlgorithm.java | 144 
 .../constraint/TestAllocationTagsManager.java   | 156 -
 .../TestBatchedRequestsIterators.java   |  82 +
 .../constraint/TestPlacementProcessor.java  |   4 +-
 15 files changed, 721 insertions(+), 239 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bcd5cc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 03c24d4..af83d8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -534,6 +534,10 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
   RM_PREFIX + "placement-constraints.algorithm.class";
 
+  /** Used for BasicPlacementAlgorithm - default SERIAL. **/
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR =
+  RM_PREFIX + "placement-constraints.algorithm.iterator";
+
   public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
   RM_PREFIX + "placement-constraints.enabled";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bcd5cc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0285069..62bbdb1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -145,7 +145,13 @@
   
 Constraint Placement Algorithm to be used.
 yarn.resourcemanager.placement-constraints.algorithm.class
-
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm
+
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm
+  
+
+  
+Placement Algorithm Requests Iterator to be 
used.
+yarn.resourcemanager.placement-constraints.algorithm.iterator
+SERIAL
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bcd5cc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index c873509..2c4ef7b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java

[10/11] hadoop git commit: YARN-7696. Add container tags to ContainerTokenIdentifier, api.Container and NMContainerStatus to handle all recovery cases. (asuresh)

2018-01-17 Thread asuresh
YARN-7696. Add container tags to ContainerTokenIdentifier, api.Container and 
NMContainerStatus to handle all recovery cases. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/308fc39e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/308fc39e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/308fc39e

Branch: refs/heads/YARN-6592
Commit: 308fc39e3b9d15595aac7bfdb2d29fa90e823594
Parents: 3f57235
Author: Arun Suresh 
Authored: Fri Jan 12 14:37:06 2018 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:53:24 2018 -0800

--
 .../hadoop/yarn/api/records/Container.java  | 15 +
 .../src/main/proto/yarn_protos.proto|  1 +
 .../api/records/impl/pb/ContainerPBImpl.java| 31 +
 .../yarn/security/ContainerTokenIdentifier.java | 69 +++-
 .../src/main/proto/yarn_security_token.proto|  1 +
 .../api/protocolrecords/NMContainerStatus.java  | 14 
 .../impl/pb/NMContainerStatusPBImpl.java| 33 ++
 .../yarn_server_common_service_protos.proto |  1 +
 .../containermanager/ContainerManagerImpl.java  |  3 +-
 .../container/ContainerImpl.java| 19 +++---
 .../rmcontainer/RMContainerImpl.java| 10 ++-
 .../scheduler/SchedulerApplicationAttempt.java  |  3 +-
 .../security/RMContainerTokenSecretManager.java | 21 ++
 .../capacity/TestContainerAllocation.java   |  5 +-
 14 files changed, 194 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/308fc39e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
index 4fdc803..b9ca3f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
@@ -27,6 +27,9 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.Collections;
+import java.util.Set;
+
 /**
  * {@code Container} represents an allocated resource in the cluster.
  * 
@@ -256,4 +259,16 @@ public abstract class Container implements 
Comparable {
   public void setVersion(int version) {
 throw new UnsupportedOperationException();
   }
+
+  @Private
+  @Unstable
+  public Set getAllocationTags() {
+return Collections.EMPTY_SET;
+  }
+
+  @Private
+  @Unstable
+  public void setAllocationTags(Set allocationTags) {
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/308fc39e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 5cb1177..25c8569 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -152,6 +152,7 @@ message ContainerProto {
   optional ExecutionTypeProto execution_type = 7 [default = GUARANTEED];
   optional int64 allocation_request_id = 8 [default = -1];
   optional int32 version = 9 [default = 0];
+  repeated string allocation_tags = 10;
 }
 
 message ContainerReportProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/308fc39e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
index be84938..47be2f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java
@@ -36,6 +36,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 imp

[08/11] hadoop git commit: YARN-7682. Expose canSatisfyConstraints utility function to validate a placement against a constraint. (Panagiotis Garefalakis via asuresh)

2018-01-17 Thread asuresh
YARN-7682. Expose canSatisfyConstraints utility function to validate a 
placement against a constraint. (Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96186f5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96186f5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96186f5c

Branch: refs/heads/YARN-6592
Commit: 96186f5c3e28ebf9a8df9ee3706b2f1620fbef4b
Parents: 2bcd5cc
Author: Arun Suresh 
Authored: Wed Jan 3 08:00:50 2018 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:52:08 2018 -0800

--
 .../constraint/PlacementConstraintsUtil.java| 132 +
 .../algorithm/DefaultPlacementAlgorithm.java|  55 +---
 .../TestPlacementConstraintsUtil.java   | 287 +++
 .../constraint/TestPlacementProcessor.java  | 204 +++--
 4 files changed, 601 insertions(+), 77 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96186f5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
new file mode 100644
index 000..956a3c9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
+
+import java.util.Iterator;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
+import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression;
+import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression.TargetType;
+import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
+import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
+import 
org.apache.hadoop.yarn.api.resource.PlacementConstraintTransformations.SingleConstraintTransformer;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm;
+
+/**
+ * This class contains various static methods used by the Placement Algorithms
+ * to simplify constrained placement.
+ * (see also {@link DefaultPlacementAlgorithm}).
+ */
+@Public
+@Unstable
+public final class PlacementConstraintsUtil {
+
+  // Suppresses default constructor, ensuring non-instantiability.
+  private PlacementConstraintsUtil() {
+  }
+
+  /**
+   * Returns true if **single** application constraint with associated
+   * allocationTags and scope is satisfied by a specific scheduler Node.
+   *
+   * @param appId the application id
+   * @param sc the placement constraint
+   * @param te the target expression
+   * @param node the scheduler node
+   * @param tm the allocation tags store
+   * @return true if single application constraint is satisfied by node
+   * @throws InvalidAllocationTagsQueryException
+   */
+  private static boolean canSatisfySingleConstraintExpression(
+  ApplicationId appId, SingleConstraint

[03/11] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

2018-01-17 Thread asuresh
YARN-7669. API and interface modifications for placement constraint processor. 
(asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d70b0467
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d70b0467
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d70b0467

Branch: refs/heads/YARN-6592
Commit: d70b0467c4e5c5017f81064f71efbb7e7be4f774
Parents: 3967631
Author: Arun Suresh 
Authored: Tue Dec 19 22:47:46 2017 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:49:57 2018 -0800

--
 .../yarn/ams/ApplicationMasterServiceUtils.java |  16 +
 .../api/protocolrecords/AllocateResponse.java   |  23 +
 .../api/records/RejectedSchedulingRequest.java  |  70 +++
 .../yarn/api/records/RejectionReason.java   |  44 ++
 .../src/main/proto/yarn_protos.proto|  10 +
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../impl/pb/AllocateResponsePBImpl.java |  85 
 .../yarn/api/records/impl/pb/ProtoUtils.java|  16 +
 .../pb/RejectedSchedulingRequestPBImpl.java | 148 +++
 .../records/impl/pb/ResourceSizingPBImpl.java   |   8 +
 .../impl/pb/SchedulingRequestPBImpl.java|  11 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |   2 +
 .../resourcemanager/RMActiveServiceContext.java |   2 +-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +-
 .../server/resourcemanager/RMContextImpl.java   |   2 +-
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java   | 431 ---
 .../constraint/AllocationTagsNamespaces.java|  31 --
 .../InvalidAllocationTagsQueryException.java|  35 --
 .../constraint/AllocationTagsManager.java   | 431 +++
 .../constraint/AllocationTagsNamespaces.java|  31 ++
 .../InvalidAllocationTagsQueryException.java|  35 ++
 .../api/ConstraintPlacementAlgorithm.java   |  43 ++
 .../api/ConstraintPlacementAlgorithmInput.java  |  32 ++
 .../api/ConstraintPlacementAlgorithmOutput.java |  58 +++
 ...traintPlacementAlgorithmOutputCollector.java |  32 ++
 .../constraint/api/PlacedSchedulingRequest.java |  79 
 .../constraint/api/SchedulingResponse.java  |  70 +++
 .../scheduler/constraint/api/package-info.java  |  28 ++
 .../constraint/TestAllocationTagsManager.java   | 328 --
 .../rmcontainer/TestRMContainerImpl.java|   2 +-
 .../scheduler/capacity/TestUtils.java   |   2 +-
 .../constraint/TestAllocationTagsManager.java   | 328 ++
 .../scheduler/fifo/TestFifoScheduler.java   |   2 +-
 34 files changed, 1608 insertions(+), 832 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70b0467/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
index 476da8b..8bdfaf3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.ams;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 
@@ -86,4 +87,19 @@ public final class ApplicationMasterServiceUtils {
 }
 allocateResponse.setAllocatedContainers(allocatedContainers);
   }
+
+  /**
+   * Add rejected Scheduling Requests to {@link AllocateResponse}.
+   * @param allocateResponse Allocate Response.
+   * @param rejectedRequests Rejected SchedulingRequests.
+   */
+  public static void addToRejectedSchedulingRequests(
+  AllocateResponse allocateResponse,
+  List rejectedRequests) {
+if (allocateResponse.getRejectedSchedulingRequests() != null
+&& !allocateResponse.getRejectedSchedulingRequests().isEmpty()) {
+  
rejectedRequests.addAll(allocateResponse.getRejectedSchedulingRequests());
+}
+allocateResponse.setRejectedSchedulingRequests(rejectedRequests);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70b0467/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/prot

[11/11] hadoop git commit: YARN-6619. AMRMClient Changes to use the PlacementConstraint and SchcedulingRequest objects. (Arun Suresh via wangda)

2018-01-17 Thread asuresh
YARN-6619. AMRMClient Changes to use the PlacementConstraint and 
SchcedulingRequest objects. (Arun Suresh via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4582e7b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4582e7b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4582e7b3

Branch: refs/heads/YARN-6592
Commit: 4582e7b389a811dcad399978e4e69c42a74e696b
Parents: 308fc39
Author: Wangda Tan 
Authored: Wed Jan 17 11:36:26 2018 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:53:52 2018 -0800

--
 .../hadoop/yarn/client/api/AMRMClient.java  |  38 +++-
 .../yarn/client/api/async/AMRMClientAsync.java  |  48 +
 .../api/async/impl/AMRMClientAsyncImpl.java |  49 -
 .../yarn/client/api/impl/AMRMClientImpl.java| 142 -
 .../client/api/impl/BaseAMRMClientTest.java | 212 +++
 .../yarn/client/api/impl/TestAMRMClient.java| 156 +-
 .../TestAMRMClientPlacementConstraints.java | 204 ++
 .../rmcontainer/RMContainerImpl.java|   3 +
 .../scheduler/AbstractYarnScheduler.java|   1 +
 .../scheduler/SchedulerApplicationAttempt.java  |   1 +
 .../constraint/PlacementConstraintsUtil.java|   4 +-
 11 files changed, 700 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4582e7b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index d3d1974..914a146 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.client.api;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
 import java.util.function.Supplier;
 import java.util.List;
 
@@ -39,7 +41,9 @@ import 
org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ProfileCapability;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -554,6 +558,18 @@ public abstract class AMRMClient extends
   }
 
   /**
+   * Add a Collection of SchedulingRequests. The AMRMClient will ensure that
+   * all requests in the same batch are sent in the same allocate call.
+   * @param schedulingRequests Collection of Scheduling Requests.
+   */
+  @Public
+  @InterfaceStability.Unstable
+  public void addSchedulingRequests(
+  Collection schedulingRequests) {
+
+  }
+
+  /**
* Register the application master. This must be called before any 
* other interaction
* @param appHostName Name of the host on which master is running
@@ -568,7 +584,27 @@ public abstract class AMRMClient extends
  int appHostPort,
  String appTrackingUrl) 
throws YarnException, IOException;
-  
+
+  /**
+   * Register the application master. This must be called before any
+   * other interaction
+   * @param appHostName Name of the host on which master is running
+   * @param appHostPort Port master is listening on
+   * @param appTrackingUrl URL at which the master info can be seen
+   * @param placementConstraints Placement Constraints mappings.
+   * @return RegisterApplicationMasterResponse
+   * @throws YarnException
+   * @throws IOException
+   */
+  @Public
+  @InterfaceStability.Unstable
+  public RegisterApplicationMasterResponse registerApplicationMaster(
+  String appHostName, int appHostPort, String appTrackingUrl,
+  Map, PlacementConstraint> placementConstraints)
+  throws YarnException, IOException {
+throw new YarnException("Not supported");
+  }
+
   /**
* Request additional containers and receive new container allocations.
* Requests made via addContainerRequest are sent to the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4582e7b3/hadoop-yarn-project/had

[04/11] hadoop git commit: YARN-7653. Node group support for AllocationTagsManager. (Panagiotis Garefalakis via asuresh)

2018-01-17 Thread asuresh
YARN-7653. Node group support for AllocationTagsManager. (Panagiotis 
Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3dd5204
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3dd5204
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3dd5204

Branch: refs/heads/YARN-6592
Commit: b3dd5204baedd75b831def755e9b0cd01a3a3ac4
Parents: d70b046
Author: Arun Suresh 
Authored: Fri Dec 22 07:24:37 2017 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:50:16 2018 -0800

--
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java   | 282 ++-
 .../rmcontainer/TestRMContainerImpl.java|   2 +-
 .../constraint/TestAllocationTagsManager.java   | 269 --
 4 files changed, 392 insertions(+), 163 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3dd5204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index adda465..d71f224 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -493,7 +493,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   }
 
   protected AllocationTagsManager createAllocationTagsManager() {
-return new AllocationTagsManager();
+return new AllocationTagsManager(this.rmContext);
   }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3dd5204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index c278606..7b0b959 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.log4j.Logger;
 
 import java.util.HashMap;
@@ -38,9 +39,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.LongBinaryOperator;
 
 /**
- * Support storing maps between container-tags/applications and
- * nodes. This will be required by affinity/anti-affinity implementation and
- * cardinality.
+ * In-memory mapping between applications/container-tags and nodes/racks.
+ * Required by constrained affinity/anti-affinity and cardinality placement.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -51,48 +51,54 @@ public class AllocationTagsManager {
 
   private ReentrantReadWriteLock.ReadLock readLock;
   private ReentrantReadWriteLock.WriteLock writeLock;
+  private final RMContext rmContext;
 
-  // Application's tags to node
-  private Map perAppMappings =
+  // Application's tags to Node
+  private Map perAppNodeMappings =
+  new HashMap<>();
+  // Application's tags to Rack
+  private Map perAppRackMappings =
   new HashMap<>();
 
   // Global tags to node mapping (used to fast return aggregated tags
   /

[06/11] hadoop git commit: YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)

2018-01-17 Thread asuresh
YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e802a6c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e802a6c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e802a6c0

Branch: refs/heads/YARN-6592
Commit: e802a6c06cb46462ad84b77e4df663d075c02316
Parents: f650d39
Author: Arun Suresh 
Authored: Fri Dec 22 15:51:20 2017 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:51:28 2018 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  26 ++
 .../src/main/resources/yarn-default.xml |  30 ++
 .../ApplicationMasterService.java   |  15 +
 .../rmcontainer/RMContainerImpl.java|   7 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../constraint/processor/BatchedRequests.java   | 105 +
 .../processor/NodeCandidateSelector.java|  38 ++
 .../processor/PlacementDispatcher.java  | 145 +++
 .../processor/PlacementProcessor.java   | 343 
 .../processor/SamplePlacementAlgorithm.java | 144 +++
 .../constraint/processor/package-info.java  |  29 ++
 .../yarn/server/resourcemanager/MockAM.java |  26 ++
 .../yarn/server/resourcemanager/MockRM.java |  14 +
 .../constraint/TestPlacementProcessor.java  | 394 +++
 14 files changed, 1316 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e802a6c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 1b6bd0e..03c24d4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -529,6 +529,32 @@ public class YarnConfiguration extends Configuration {
   /** The class to use as the resource scheduler.*/
   public static final String RM_SCHEDULER = 
 RM_PREFIX + "scheduler.class";
+
+  /** Placement Algorithm. */
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
+  RM_PREFIX + "placement-constraints.algorithm.class";
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
+  RM_PREFIX + "placement-constraints.enabled";
+
+  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = true;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS =
+  RM_PREFIX + "placement-constraints.retry-attempts";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS = 3;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE =
+  RM_PREFIX + "placement-constraints.algorithm.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE 
=
+  1;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE =
+  RM_PREFIX + "placement-constraints.scheduler.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE 
=
+  1;
  
   public static final String DEFAULT_RM_SCHEDULER = 
   
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e802a6c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d450eca..0285069 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -131,6 +131,36 @@
   
 
   
+Enable Constraint Placement.
+yarn.resourcemanager.placement-constraints.enabled
+false
+  
+
+  
+Number of times to retry placing of rejected 
SchedulingRequests
+yarn.resourcemanager.placement-constraints.retry-attempts
+3
+  
+
+  
+Constraint Placement Algorithm to be used.
+yarn.resourcemanager.placement-constraints.algorithm.class
+
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm
+  
+
+  
+Threadpool size for the Algorithm used for place

[02/11] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

2018-01-17 Thread asuresh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70b0467/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
new file mode 100644
index 000..9571f0e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Encapsulates the output of the ConstraintPlacementAlgorithm. The Algorithm
+ * is free to produce multiple of output objects at the end of each run and it
+ * must use the provided ConstraintPlacementAlgorithmOutputCollector to
+ * aggregate/collect this output. Similar to the MapReduce Mapper/Reducer
+ * which is provided a collector to collect output.
+ */
+public class ConstraintPlacementAlgorithmOutput {
+
+  private final ApplicationId applicationId;
+
+  public ConstraintPlacementAlgorithmOutput(ApplicationId applicationId) {
+this.applicationId = applicationId;
+  }
+
+  private final List placedRequests =
+  new ArrayList<>();
+
+  private final List rejectedRequests =
+  new ArrayList<>();
+
+  public List getPlacedRequests() {
+return placedRequests;
+  }
+
+  public List getRejectedRequests() {
+return rejectedRequests;
+  }
+
+  public ApplicationId getApplicationId() {
+return applicationId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d70b0467/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
new file mode 100644
index 000..131fd42
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permi

[09/11] hadoop git commit: YARN-7681. Double-check placement constraints in scheduling phase before actual allocation is made. (Weiwei Yang via asuresh)

2018-01-17 Thread asuresh
YARN-7681. Double-check placement constraints in scheduling phase before actual 
allocation is made. (Weiwei Yang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f572351
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f572351
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f572351

Branch: refs/heads/YARN-6592
Commit: 3f572351daa5256789c1542691c09e0082906fa6
Parents: 96186f5
Author: Arun Suresh 
Authored: Wed Jan 10 09:04:30 2018 -0800
Committer: Arun Suresh 
Committed: Wed Jan 17 13:52:24 2018 -0800

--
 .../scheduler/capacity/CapacityScheduler.java   | 23 
 1 file changed, 23 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f572351/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index f03d7d1..956d840 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -123,6 +123,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCo
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintsUtil;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
@@ -2512,6 +2514,27 @@ public class CapacityScheduler extends
 ResourceCommitRequest
 resourceCommitRequest = createResourceCommitRequest(
 appAttempt, schedulingRequest, schedulerNode);
+
+// Validate placement constraint is satisfied before
+// committing the request.
+try {
+  if (!PlacementConstraintsUtil.canSatisfyConstraints(
+  appAttempt.getApplicationId(),
+  schedulingRequest.getAllocationTags(),
+  schedulerNode,
+  rmContext.getPlacementConstraintManager(),
+  rmContext.getAllocationTagsManager())) {
+LOG.debug("Failed to allocate container for application "
++ appAttempt.getApplicationId() + " on node "
++ schedulerNode.getNodeName()
++ " because this allocation violates the"
++ " placement constraint.");
+return false;
+  }
+} catch (InvalidAllocationTagsQueryException e) {
+  LOG.warn("Unable to allocate container", e);
+  return false;
+}
 return tryCommit(getClusterResource(), resourceCommitRequest, false);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7758. Add an additional check to the validity of container and application ids passed to container-executor. Contributed by Yufei Gu.

2018-01-17 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 244369243 -> 762935315


YARN-7758. Add an additional check to the validity of container and application 
ids passed to container-executor. Contributed by Yufei Gu.

(cherry picked from commit 41049ba5d129f0fd0953ed8fdeb12635f7546bb2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76293531
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76293531
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76293531

Branch: refs/heads/branch-3.0
Commit: 7629353153476a0abf6a347807a1e1ea5be33611
Parents: 2443692
Author: Miklos Szegedi 
Authored: Tue Jan 16 15:40:43 2018 -0800
Committer: Miklos Szegedi 
Committed: Wed Jan 17 13:47:44 2018 -0800

--
 .../main/native/container-executor/impl/container-executor.c| 3 ++-
 .../src/main/native/container-executor/impl/main.c  | 5 +
 .../src/main/native/container-executor/impl/util.h  | 3 ++-
 3 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76293531/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 366bbb5..8419313 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1068,7 +1068,8 @@ int create_log_dirs(const char *app_id, char * const * 
log_dirs) {
   for(log_root=log_dirs; *log_root != NULL; ++log_root) {
 char *app_log_dir = get_app_log_directory(*log_root, app_id);
 int result = check_nm_local_dir(nm_uid, *log_root);
-if (result != 0) {
+if (result != 0 && app_log_dir != NULL) {
+  free(app_log_dir);
   app_log_dir = NULL;
 }
 if (app_log_dir == NULL) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76293531/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index af401b8..5c327cf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -23,6 +23,7 @@
 #include "get_executable.h"
 #include "modules/gpu/gpu-module.h"
 #include "modules/cgroups/cgroups-operations.h"
+#include "utils/string-utils.h"
 
 #include 
 #include 
@@ -362,6 +363,10 @@ static int validate_run_as_user_commands(int argc, char 
**argv, int *operation)
 }
 cmd_input.app_id = argv[optind++];
 cmd_input.container_id = argv[optind++];
+if (!validate_container_id(cmd_input.container_id)) {
+  fprintf(ERRORFILE, "Invalid container id %s\n", cmd_input.container_id);
+  return INVALID_CONTAINER_ID;
+}
 cmd_input.cred_file = argv[optind++];
 cmd_input.local_dirs = argv[optind++];// good local dirs as a comma 
separated list
 cmd_input.log_dirs = argv[optind++];// good log dirs as a comma separated 
list

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76293531/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
index ed9fba8..6aac1fe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/util.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executo

hadoop git commit: YARN-7717. Add configuration consistency for module.enabled and docker.privileged-containers.enabled. Contributed by Eric Badger.

2018-01-17 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e42d0582 -> a68e445dc


YARN-7717. Add configuration consistency for module.enabled and 
docker.privileged-containers.enabled. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a68e445d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a68e445d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a68e445d

Branch: refs/heads/trunk
Commit: a68e445dc682f4a123cdf016ce1aa46e550c7fdf
Parents: 6e42d05
Author: Miklos Szegedi 
Authored: Wed Jan 17 14:11:14 2018 -0800
Committer: Miklos Szegedi 
Committed: Wed Jan 17 14:11:14 2018 -0800

--
 .../hadoop-yarn/conf/container-executor.cfg |  4 +--
 .../impl/container-executor.c   | 31 +++---
 .../container-executor/impl/utils/docker-util.c |  6 ++--
 .../test/test-container-executor.c  |  6 
 .../test/utils/test_docker_util.cc  | 34 
 .../src/site/markdown/DockerContainers.md   |  4 +--
 6 files changed, 53 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68e445d/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg 
b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
index 4c698b5..36676b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
+++ b/hadoop-yarn-project/hadoop-yarn/conf/container-executor.cfg
@@ -2,7 +2,7 @@ yarn.nodemanager.linux-container-executor.group=#configured 
value of yarn.nodema
 banned.users=#comma separated list of users who can not run applications
 min.user.id=1000#Prevent other super-users
 allowed.system.users=##comma separated list of system users who CAN run 
applications
-feature.tc.enabled=0
+feature.tc.enabled=false
 
 # The configs below deal with settings for Docker
 #[docker]
@@ -13,7 +13,7 @@ feature.tc.enabled=0
 #  docker.allowed.networks=## comma seperated networks that can be used. e.g 
bridge,host,none
 #  docker.allowed.ro-mounts=## comma seperated volumes that can be mounted as 
read-only
 #  docker.allowed.rw-mounts=## comma seperate volumes that can be mounted as 
read-write, add the yarn local and log dirs to this list to run Hadoop jobs
-#  docker.privileged-containers.enabled=0
+#  docker.privileged-containers.enabled=false
 #  docker.allowed.volume-drivers=## comma seperated list of allowed 
volume-drivers
 
 # The configs below deal with settings for FPGA resource

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68e445d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 98e2d6e..b0b8e76 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -39,6 +39,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -441,19 +442,25 @@ int is_feature_enabled(const char* feature_key, int 
default_value,
 int enabled = default_value;
 
 if (enabled_str != NULL) {
-char *end_ptr = NULL;
-enabled = strtol(enabled_str, &end_ptr, 10);
-
-if ((enabled_str == end_ptr || *end_ptr != '\0') ||
-(enabled < 0 || enabled > 1)) {
-  fprintf(LOGFILE, "Illegal value '%s' for '%s' in configuration. "
-  "Using default value: %d.\n", enabled_str, feature_key,
-  default_value);
-  fflush(LOGFILE);
-  free(enabled_str);
-  return default_value;
-}
+if (strcasecmp(enabled_str, "true") == 0) {
+enabled = 1;
+} else if (strcasecmp(enabled_str, "false") == 0) {
+enabled = 0;
+} else {
+char *end_ptr = NULL;
+enabled = strtol(enabled_str, &end_ptr, 10);
+
+if ((enabled_str == end_ptr || *end_ptr != '\0') ||
+(enabled < 0 || enabled > 1)) {
+  fprintf(LOGFILE, "Illegal value '%s' for '%s' in 
configuration. "
+  "Using default value: %d.\n", enabled_str, featur

hadoop git commit: HADOOP-15177. Update the release year to 2018. Contributed by Bharat Viswanadham.

2018-01-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk a68e445dc -> cdaf92c9f


HADOOP-15177. Update the release year to 2018. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdaf92c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdaf92c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdaf92c9

Branch: refs/heads/trunk
Commit: cdaf92c9f57560219b8f915a19ad8603ddf2a505
Parents: a68e445
Author: Akira Ajisaka 
Authored: Thu Jan 18 13:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 18 13:15:57 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdaf92c9/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a73b6af..66958a2 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -31,7 +31,7 @@
 
   
 
-2017
+2018
 
 false
 
true


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15177. Update the release year to 2018. Contributed by Bharat Viswanadham.

2018-01-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 762935315 -> 88e3ad31b


HADOOP-15177. Update the release year to 2018. Contributed by Bharat 
Viswanadham.

(cherry picked from commit cdaf92c9f57560219b8f915a19ad8603ddf2a505)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88e3ad31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88e3ad31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88e3ad31

Branch: refs/heads/branch-3.0
Commit: 88e3ad31b5cfd1112c9b4ca99feb31d08b1c3692
Parents: 7629353
Author: Akira Ajisaka 
Authored: Thu Jan 18 13:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 18 13:16:46 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88e3ad31/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 92ed9be..92bd595 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -31,7 +31,7 @@
 
   
 
-2017
+2018
 
 false
 
true


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15177. Update the release year to 2018. Contributed by Bharat Viswanadham.

2018-01-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 98835e14a -> c614a39e9


HADOOP-15177. Update the release year to 2018. Contributed by Bharat 
Viswanadham.

(cherry picked from commit cdaf92c9f57560219b8f915a19ad8603ddf2a505)
(cherry picked from commit b0c9b89281c9264c70ae79b1e4830e0fa2411b4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c614a39e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c614a39e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c614a39e

Branch: refs/heads/branch-2.9
Commit: c614a39e9f40fda6e169b8762a890b42b1b2952d
Parents: 98835e1
Author: Akira Ajisaka 
Authored: Thu Jan 18 13:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 18 13:17:42 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c614a39e/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 58faa10..3b506f9 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -31,7 +31,7 @@
 
   
 
-2017
+2018
 
 false
 
true


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15177. Update the release year to 2018. Contributed by Bharat Viswanadham.

2018-01-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 86a2ac94b -> b0c9b8928


HADOOP-15177. Update the release year to 2018. Contributed by Bharat 
Viswanadham.

(cherry picked from commit cdaf92c9f57560219b8f915a19ad8603ddf2a505)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0c9b892
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0c9b892
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0c9b892

Branch: refs/heads/branch-2
Commit: b0c9b89281c9264c70ae79b1e4830e0fa2411b4f
Parents: 86a2ac9
Author: Akira Ajisaka 
Authored: Thu Jan 18 13:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 18 13:17:07 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0c9b892/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 544535b..7051b57 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -31,7 +31,7 @@
 
   
 
-2017
+2018
 
 false
 
true


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15177. Update the release year to 2018. Contributed by Bharat Viswanadham.

2018-01-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 34f08f740 -> 6e509ac61


HADOOP-15177. Update the release year to 2018. Contributed by Bharat 
Viswanadham.

(cherry picked from commit cdaf92c9f57560219b8f915a19ad8603ddf2a505)
(cherry picked from commit b0c9b89281c9264c70ae79b1e4830e0fa2411b4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e509ac6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e509ac6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e509ac6

Branch: refs/heads/branch-2.8
Commit: 6e509ac614a3ad03dd50df2ae1295860658b6ae2
Parents: 34f08f7
Author: Akira Ajisaka 
Authored: Thu Jan 18 13:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 18 13:18:10 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e509ac6/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 448e91a..5fa 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -31,7 +31,7 @@
 
   
 
-2017
+2018
 
 false
 
true


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15177. Update the release year to 2018. Contributed by Bharat Viswanadham.

2018-01-17 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 88d951e30 -> 17a673e74


HADOOP-15177. Update the release year to 2018. Contributed by Bharat 
Viswanadham.

(cherry picked from commit cdaf92c9f57560219b8f915a19ad8603ddf2a505)
(cherry picked from commit b0c9b89281c9264c70ae79b1e4830e0fa2411b4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17a673e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17a673e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17a673e7

Branch: refs/heads/branch-2.7
Commit: 17a673e740cc626f1537a8434ecce4ff7ffe3b8e
Parents: 88d951e
Author: Akira Ajisaka 
Authored: Thu Jan 18 13:15:57 2018 +0900
Committer: Akira Ajisaka 
Committed: Thu Jan 18 13:20:33 2018 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-project/pom.xml  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a673e7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1e5c164..a401fdc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.7.6 - UNRELEASED
 Random and 256 bit secrets (Conttributed by Robert Kanter via
 Daniel Templeton)
 
+HADOOP-15177. Update the release year to 2018.
+(Bharat Viswanadham via aajisaka)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17a673e7/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 27990b7..cf073fe 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -31,7 +31,7 @@
 
   
 
-2017
+2018
 
 false
 
true


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/5] hadoop git commit: HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D arg). Contributed by Brahma Reddy Battula.

2018-01-17 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b0c9b8928 -> 8181367c6
  refs/heads/branch-2.8 6e509ac61 -> 759b1e14f
  refs/heads/branch-2.9 c614a39e9 -> 3cdc39215
  refs/heads/branch-3.0 88e3ad31b -> e57540671
  refs/heads/trunk cdaf92c9f -> 08332e12d


HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D 
arg). Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08332e12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08332e12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08332e12

Branch: refs/heads/trunk
Commit: 08332e12d055d85472f0c9371fefe9b56bfea1ed
Parents: cdaf92c
Author: Brahma Reddy Battula 
Authored: Thu Jan 18 10:54:32 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Jan 18 10:54:32 2018 +0530

--
 .../src/main/java/org/apache/hadoop/fs/FsShell.java   |  2 ++
 .../java/org/apache/hadoop/fs/TestFsShellList.java| 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08332e12/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 721f4df..94d3389 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -99,6 +100,7 @@ public class FsShell extends Configured implements Tool {
   
   protected void init() throws IOException {
 getConf().setQuietMode(true);
+UserGroupInformation.setConfiguration(getConf());
 if (commandFactory == null) {
   commandFactory = new CommandFactory(getConf());
   commandFactory.addObject(new Help(), "-help");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08332e12/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
index 03720d3..c780f41 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
@@ -75,4 +75,18 @@ public class TestFsShellList {
 lsArgv = new String[]{"-ls", "-q", testRootDir.toString()};
 assertThat(shell.run(lsArgv), is(0));
   }
+
+  /*
+  UGI params should take effect when we pass.
+ */
+  @Test(expected = IllegalArgumentException.class)
+  public void testListWithUGI() throws Exception {
+FsShell fsShell = new FsShell(new Configuration());
+//Passing Dummy such that it should through IAE
+fsShell.getConf()
+.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+"DUMMYAUTH");
+String[] lsArgv = new String[] {"-ls", testRootDir.toString()};
+fsShell.run(lsArgv);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D arg). Contributed by Brahma Reddy Battula.

2018-01-17 Thread brahma
HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D 
arg). Contributed by Brahma Reddy Battula.

(cherry picked from commit 08332e12d055d85472f0c9371fefe9b56bfea1ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5754067
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5754067
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5754067

Branch: refs/heads/branch-3.0
Commit: e575406715c3b2213f7f3096b8c22a9f991db5fd
Parents: 88e3ad3
Author: Brahma Reddy Battula 
Authored: Thu Jan 18 10:54:32 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Jan 18 10:56:12 2018 +0530

--
 .../src/main/java/org/apache/hadoop/fs/FsShell.java   |  2 ++
 .../java/org/apache/hadoop/fs/TestFsShellList.java| 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5754067/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 721f4df..94d3389 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -99,6 +100,7 @@ public class FsShell extends Configured implements Tool {
   
   protected void init() throws IOException {
 getConf().setQuietMode(true);
+UserGroupInformation.setConfiguration(getConf());
 if (commandFactory == null) {
   commandFactory = new CommandFactory(getConf());
   commandFactory.addObject(new Help(), "-help");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5754067/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
index 03720d3..c780f41 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
@@ -75,4 +75,18 @@ public class TestFsShellList {
 lsArgv = new String[]{"-ls", "-q", testRootDir.toString()};
 assertThat(shell.run(lsArgv), is(0));
   }
+
+  /*
+  UGI params should take effect when we pass.
+ */
+  @Test(expected = IllegalArgumentException.class)
+  public void testListWithUGI() throws Exception {
+FsShell fsShell = new FsShell(new Configuration());
+//Passing Dummy such that it should through IAE
+fsShell.getConf()
+.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+"DUMMYAUTH");
+String[] lsArgv = new String[] {"-ls", testRootDir.toString()};
+fsShell.run(lsArgv);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/5] hadoop git commit: HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D arg). Contributed by Brahma Reddy Battula.

2018-01-17 Thread brahma
HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D 
arg). Contributed by Brahma Reddy Battula.

(cherry picked from commit 08332e12d055d85472f0c9371fefe9b56bfea1ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8181367c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8181367c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8181367c

Branch: refs/heads/branch-2
Commit: 8181367c623f3a5a042cc7fa3ecd3047e202ea1d
Parents: b0c9b89
Author: Brahma Reddy Battula 
Authored: Thu Jan 18 10:54:32 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Jan 18 10:56:58 2018 +0530

--
 .../src/main/java/org/apache/hadoop/fs/FsShell.java   |  2 ++
 .../java/org/apache/hadoop/fs/TestFsShellList.java| 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8181367c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index eccfbfc..0bf72c9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -92,6 +93,7 @@ public class FsShell extends Configured implements Tool {
   
   protected void init() throws IOException {
 getConf().setQuietMode(true);
+UserGroupInformation.setConfiguration(getConf());
 if (commandFactory == null) {
   commandFactory = new CommandFactory(getConf());
   commandFactory.addObject(new Help(), "-help");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8181367c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
index 03720d3..c780f41 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
@@ -75,4 +75,18 @@ public class TestFsShellList {
 lsArgv = new String[]{"-ls", "-q", testRootDir.toString()};
 assertThat(shell.run(lsArgv), is(0));
   }
+
+  /*
+  UGI params should take effect when we pass.
+ */
+  @Test(expected = IllegalArgumentException.class)
+  public void testListWithUGI() throws Exception {
+FsShell fsShell = new FsShell(new Configuration());
+//Passing Dummy such that it should through IAE
+fsShell.getConf()
+.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+"DUMMYAUTH");
+String[] lsArgv = new String[] {"-ls", testRootDir.toString()};
+fsShell.run(lsArgv);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/5] hadoop git commit: HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D arg). Contributed by Brahma Reddy Battula.

2018-01-17 Thread brahma
HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D 
arg). Contributed by Brahma Reddy Battula.

(cherry picked from commit 08332e12d055d85472f0c9371fefe9b56bfea1ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cdc3921
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cdc3921
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cdc3921

Branch: refs/heads/branch-2.9
Commit: 3cdc3921509aed53c5405289ff34e44c508da29a
Parents: c614a39
Author: Brahma Reddy Battula 
Authored: Thu Jan 18 10:54:32 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Jan 18 11:02:38 2018 +0530

--
 .../src/main/java/org/apache/hadoop/fs/FsShell.java   |  2 ++
 .../java/org/apache/hadoop/fs/TestFsShellList.java| 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cdc3921/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index eccfbfc..0bf72c9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -92,6 +93,7 @@ public class FsShell extends Configured implements Tool {
   
   protected void init() throws IOException {
 getConf().setQuietMode(true);
+UserGroupInformation.setConfiguration(getConf());
 if (commandFactory == null) {
   commandFactory = new CommandFactory(getConf());
   commandFactory.addObject(new Help(), "-help");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cdc3921/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
index 03720d3..c780f41 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
@@ -75,4 +75,18 @@ public class TestFsShellList {
 lsArgv = new String[]{"-ls", "-q", testRootDir.toString()};
 assertThat(shell.run(lsArgv), is(0));
   }
+
+  /*
+  UGI params should take effect when we pass.
+ */
+  @Test(expected = IllegalArgumentException.class)
+  public void testListWithUGI() throws Exception {
+FsShell fsShell = new FsShell(new Configuration());
+//Passing Dummy such that it should through IAE
+fsShell.getConf()
+.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+"DUMMYAUTH");
+String[] lsArgv = new String[] {"-ls", testRootDir.toString()};
+fsShell.run(lsArgv);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/5] hadoop git commit: HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D arg). Contributed by Brahma Reddy Battula.

2018-01-17 Thread brahma
HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D 
arg). Contributed by Brahma Reddy Battula.

(cherry picked from commit 08332e12d055d85472f0c9371fefe9b56bfea1ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/759b1e14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/759b1e14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/759b1e14

Branch: refs/heads/branch-2.8
Commit: 759b1e14f92e51972e952d9981c545526631fcb5
Parents: 6e509ac
Author: Brahma Reddy Battula 
Authored: Thu Jan 18 10:54:32 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Jan 18 11:08:25 2018 +0530

--
 .../src/main/java/org/apache/hadoop/fs/FsShell.java   |  2 ++
 .../java/org/apache/hadoop/fs/TestFsShellList.java| 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/759b1e14/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 97b65f2..d183b44 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -92,6 +93,7 @@ public class FsShell extends Configured implements Tool {
   
   protected void init() throws IOException {
 getConf().setQuietMode(true);
+UserGroupInformation.setConfiguration(getConf());
 if (commandFactory == null) {
   commandFactory = new CommandFactory(getConf());
   commandFactory.addObject(new Help(), "-help");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/759b1e14/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
index 03720d3..c780f41 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
@@ -75,4 +75,18 @@ public class TestFsShellList {
 lsArgv = new String[]{"-ls", "-q", testRootDir.toString()};
 assertThat(shell.run(lsArgv), is(0));
   }
+
+  /*
+  UGI params should take effect when we pass.
+ */
+  @Test(expected = IllegalArgumentException.class)
+  public void testListWithUGI() throws Exception {
+FsShell fsShell = new FsShell(new Configuration());
+//Passing Dummy such that it should through IAE
+fsShell.getConf()
+.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+"DUMMYAUTH");
+String[] lsArgv = new String[] {"-ls", testRootDir.toString()};
+fsShell.run(lsArgv);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org