[1/3] hadoop git commit: HDFS-11432. Federation : Support fully qualified path for Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy Battula.

2017-02-28 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c7ddf95d4 -> a545ee4d7
  refs/heads/branch-2.8 265ddb20c -> 31bec2c16
  refs/heads/trunk 989bd56b9 -> dcd03df9f


HDFS-11432. Federation : Support fully qualified path for 
Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcd03df9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcd03df9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcd03df9

Branch: refs/heads/trunk
Commit: dcd03df9f9e0080d7e179060ffc8148336c31b3e
Parents: 989bd56
Author: Brahma Reddy Battula 
Authored: Wed Mar 1 10:45:56 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Mar 1 10:45:56 2017 +0530

--
 .../org/apache/hadoop/fs/shell/Command.java | 14 -
 .../apache/hadoop/hdfs/tools/CacheAdmin.java|  8 +--
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   | 18 +++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 57 +++-
 .../hdfs/tools/snapshot/SnapshotDiff.java   | 19 ++-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 26 +
 .../hadoop/hdfs/TestSnapshotCommands.java   | 34 
 7 files changed, 136 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
index c573aa0..4c5cbad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
@@ -101,7 +101,17 @@ abstract public class Command extends Configured {
* @throws IOException if any error occurs
*/
   abstract protected void run(Path path) throws IOException;
-  
+
+  /**
+   * Execute the command on the input path data. Commands can override to make
+   * use of the resolved filesystem.
+   * @param pathData The input path with resolved filesystem
+   * @throws IOException
+   */
+  protected void run(PathData pathData) throws IOException {
+run(pathData.path);
+  }
+
   /** 
* For each source path, execute the command
* 
@@ -113,7 +123,7 @@ abstract public class Command extends Configured {
   try {
 PathData[] srcs = PathData.expandAsGlob(src, getConf());
 for (PathData s : srcs) {
-  run(s.path);
+  run(s);
 }
   } catch (IOException e) {
 exitCode = -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
index 522f701..d8cbfc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
@@ -189,8 +189,9 @@ public class CacheAdmin extends Configured implements Tool {
 System.err.println("Can't understand argument: " + args.get(0));
 return 1;
   }
-
-  DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+
+  DistributedFileSystem dfs =
+  AdminHelper.getDFS(new Path(path).toUri(), conf);
   CacheDirectiveInfo directive = builder.build();
   EnumSet flags = EnumSet.noneOf(CacheFlag.class);
   if (force) {
@@ -409,7 +410,8 @@ public class CacheAdmin extends Configured implements Tool {
   }
   int exitCode = 0;
   try {
-DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+DistributedFileSystem dfs =
+AdminHelper.getDFS(new Path(path).toUri(), conf);
 RemoteIterator iter =
 dfs.listCacheDirectives(
 new CacheDirectiveInfo.Builder().

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcd03df9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
index 225f11a..4c7335f 100644
--- 
a/hadoop-hdfs-project/

[3/3] hadoop git commit: HDFS-11432. Federation : Support fully qualified path for Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy Battula.

2017-02-28 Thread brahma
HDFS-11432. Federation : Support fully qualified path for 
Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy 
Battula.

(cherry picked from commit dcd03df9f9e0080d7e179060ffc8148336c31b3e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31bec2c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31bec2c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31bec2c1

Branch: refs/heads/branch-2.8
Commit: 31bec2c16b9ae6de5354045916aaea71e5d8f6f2
Parents: 265ddb2
Author: Brahma Reddy Battula 
Authored: Wed Mar 1 10:45:56 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Mar 1 10:49:36 2017 +0530

--
 .../org/apache/hadoop/fs/shell/Command.java | 14 -
 .../apache/hadoop/hdfs/tools/CacheAdmin.java|  8 +--
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   | 18 +++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 57 +++-
 .../hdfs/tools/snapshot/SnapshotDiff.java   | 19 ++-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 26 +
 .../hadoop/hdfs/TestSnapshotCommands.java   | 34 
 7 files changed, 136 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31bec2c1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
index fff07aa..cda26e8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
@@ -100,7 +100,17 @@ abstract public class Command extends Configured {
* @throws IOException if any error occurs
*/
   abstract protected void run(Path path) throws IOException;
-  
+
+  /**
+   * Execute the command on the input path data. Commands can override to make
+   * use of the resolved filesystem.
+   * @param pathData The input path with resolved filesystem
+   * @throws IOException
+   */
+  protected void run(PathData pathData) throws IOException {
+run(pathData.path);
+  }
+
   /** 
* For each source path, execute the command
* 
@@ -112,7 +122,7 @@ abstract public class Command extends Configured {
   try {
 PathData[] srcs = PathData.expandAsGlob(src, getConf());
 for (PathData s : srcs) {
-  run(s.path);
+  run(s);
 }
   } catch (IOException e) {
 exitCode = -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31bec2c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
index 6888ea8..3c17c2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
@@ -185,8 +185,9 @@ public class CacheAdmin extends Configured implements Tool {
 System.err.println("Can't understand argument: " + args.get(0));
 return 1;
   }
-
-  DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+
+  DistributedFileSystem dfs =
+  AdminHelper.getDFS(new Path(path).toUri(), conf);
   CacheDirectiveInfo directive = builder.build();
   EnumSet flags = EnumSet.noneOf(CacheFlag.class);
   if (force) {
@@ -405,7 +406,8 @@ public class CacheAdmin extends Configured implements Tool {
   }
   int exitCode = 0;
   try {
-DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+DistributedFileSystem dfs =
+AdminHelper.getDFS(new Path(path).toUri(), conf);
 RemoteIterator iter =
 dfs.listCacheDirectives(
 new CacheDirectiveInfo.Builder().

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31bec2c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
index b78da31..c2c1363 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
+++ 
b/hadoop-hdfs-project

[2/3] hadoop git commit: HDFS-11432. Federation : Support fully qualified path for Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy Battula.

2017-02-28 Thread brahma
HDFS-11432. Federation : Support fully qualified path for 
Quota/Snapshot/cacheadmin/cryptoadmin commands. Contributed by Brahma Reddy 
Battula.

(cherry picked from commit dcd03df9f9e0080d7e179060ffc8148336c31b3e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a545ee4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a545ee4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a545ee4d

Branch: refs/heads/branch-2
Commit: a545ee4d7f06b4920ed0841651a2eb40016491f4
Parents: c7ddf95
Author: Brahma Reddy Battula 
Authored: Wed Mar 1 10:45:56 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Wed Mar 1 10:47:47 2017 +0530

--
 .../org/apache/hadoop/fs/shell/Command.java | 14 -
 .../apache/hadoop/hdfs/tools/CacheAdmin.java|  8 +--
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   | 18 +++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 57 +++-
 .../hdfs/tools/snapshot/SnapshotDiff.java   | 19 ++-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 26 +
 .../hadoop/hdfs/TestSnapshotCommands.java   | 34 
 7 files changed, 136 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a545ee4d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
index fff07aa..cda26e8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
@@ -100,7 +100,17 @@ abstract public class Command extends Configured {
* @throws IOException if any error occurs
*/
   abstract protected void run(Path path) throws IOException;
-  
+
+  /**
+   * Execute the command on the input path data. Commands can override to make
+   * use of the resolved filesystem.
+   * @param pathData The input path with resolved filesystem
+   * @throws IOException
+   */
+  protected void run(PathData pathData) throws IOException {
+run(pathData.path);
+  }
+
   /** 
* For each source path, execute the command
* 
@@ -112,7 +122,7 @@ abstract public class Command extends Configured {
   try {
 PathData[] srcs = PathData.expandAsGlob(src, getConf());
 for (PathData s : srcs) {
-  run(s.path);
+  run(s);
 }
   } catch (IOException e) {
 exitCode = -1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a545ee4d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
index 522f701..d8cbfc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
@@ -189,8 +189,9 @@ public class CacheAdmin extends Configured implements Tool {
 System.err.println("Can't understand argument: " + args.get(0));
 return 1;
   }
-
-  DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+
+  DistributedFileSystem dfs =
+  AdminHelper.getDFS(new Path(path).toUri(), conf);
   CacheDirectiveInfo directive = builder.build();
   EnumSet flags = EnumSet.noneOf(CacheFlag.class);
   if (force) {
@@ -409,7 +410,8 @@ public class CacheAdmin extends Configured implements Tool {
   }
   int exitCode = 0;
   try {
-DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+DistributedFileSystem dfs =
+AdminHelper.getDFS(new Path(path).toUri(), conf);
 RemoteIterator iter =
 dfs.listCacheDirectives(
 new CacheDirectiveInfo.Builder().

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a545ee4d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
index 225f11a..4c7335f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
+++ 
b/hadoop-hdfs-project/h

hadoop git commit: HADOOP-14130. Simplify DynamoDBClientFactory for creating Amazon DynamoDB clients. Contributed by Mingliang Liu

2017-02-28 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 32e9a24f9 -> f41d9b024


HADOOP-14130. Simplify DynamoDBClientFactory for creating Amazon DynamoDB 
clients. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f41d9b02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f41d9b02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f41d9b02

Branch: refs/heads/HADOOP-13345
Commit: f41d9b02477be65fc960e3f21cf647acb12ecfbe
Parents: 32e9a24
Author: Mingliang Liu 
Authored: Mon Feb 27 21:30:47 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Feb 28 18:19:02 2017 -0800

--
 .../src/main/resources/core-default.xml |  7 +-
 .../org/apache/hadoop/fs/s3a/Constants.java | 11 +--
 .../fs/s3a/s3guard/DynamoDBClientFactory.java   | 92 ++--
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 55 
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  | 38 
 .../s3a/s3guard/TestDynamoDBMetadataStore.java  | 44 --
 .../scale/ITestDynamoDBMetadataStoreScale.java  |  2 +-
 7 files changed, 105 insertions(+), 144 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f41d9b02/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ea444d2..486959e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1291,13 +1291,12 @@
 
 
 
-  fs.s3a.s3guard.ddb.endpoint
+  fs.s3a.s3guard.ddb.region
   
   
-AWS DynamoDB endpoint to connect to. An up-to-date list is
+AWS DynamoDB region to connect to. An up-to-date list is
 provided in the AWS Documentation: regions and endpoints. Without this
-property, the AWS SDK will look up a regional endpoint automatically
-according to the S3 region.
+property, the S3Guard will operate table in the associated S3 bucket 
region.
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f41d9b02/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 6991a13..e14e341 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -333,14 +333,15 @@ public final class Constants {
   "fs.s3a.s3guard.cli.prune.age";
 
   /**
-   * The endpoint of the DynamoDB service.
+   * The region of the DynamoDB service.
*
-   * This config has no default value. If the user does not set this, the AWS
-   * SDK will find the endpoint automatically by the Region.
+   * This config has no default value. If the user does not set this, the
+   * S3Guard will operate table in the associated S3 bucket region.
*/
   @InterfaceStability.Unstable
-  public static final String S3GUARD_DDB_ENDPOINT_KEY =
-  "fs.s3a.s3guard.ddb.endpoint";
+  public static final String S3GUARD_DDB_REGION_KEY =
+  "fs.s3a.s3guard.ddb.region";
+
   /**
* The DynamoDB table name to use.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f41d9b02/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
index 05b96dc..8f1b9e3 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBClientFactory.java
@@ -19,12 +19,12 @@
 package org.apache.hadoop.fs.s3a.s3guard;
 
 import java.io.IOException;
-import java.net.URI;
 
 import com.amazonaws.ClientConfiguration;
 import com.amazonaws.auth.AWSCredentialsProvider;
-import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
-import com.amazonaws.services.s3.model.Region;
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
+import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
+import com.google.common.base.Preconditions;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.Log

hadoop git commit: YARN-6190. Validation and synchronization fixes in LocalityMulticastAMRMProxyPolicy. (Botong Huang via curino)

2017-02-28 Thread curino
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 611a7fec4 -> f743fe342


YARN-6190. Validation and synchronization fixes in 
LocalityMulticastAMRMProxyPolicy. (Botong Huang via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f743fe34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f743fe34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f743fe34

Branch: refs/heads/YARN-2915
Commit: f743fe34290d3e7d11b66096f1a3b90f6a943d90
Parents: 611a7fe
Author: Carlo Curino 
Authored: Tue Feb 28 17:04:20 2017 -0800
Committer: Carlo Curino 
Committed: Tue Feb 28 17:04:20 2017 -0800

--
 .../LocalityMulticastAMRMProxyPolicy.java   | 63 +---
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 21 ++-
 .../policies/manager/BasePolicyManagerTest.java |  3 -
 .../resolver/TestDefaultSubClusterResolver.java |  9 ++-
 .../utils/FederationPoliciesTestUtil.java   |  6 +-
 5 files changed, 73 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f743fe34/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 283f89e..6f97a51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
@@ -143,10 +144,9 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 Map newWeightsConverted = new HashMap<>();
 boolean allInactive = true;
 WeightedPolicyInfo policy = getPolicyInfo();
-if (policy.getAMRMPolicyWeights() == null
-|| policy.getAMRMPolicyWeights().size() == 0) {
-  allInactive = false;
-} else {
+
+if (policy.getAMRMPolicyWeights() != null
+&& policy.getAMRMPolicyWeights().size() > 0) {
   for (Map.Entry e : policy.getAMRMPolicyWeights()
   .entrySet()) {
 if (e.getValue() > 0) {
@@ -180,7 +180,6 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 
 this.federationFacade =
 policyContext.getFederationStateStoreFacade();
-this.bookkeeper = new AllocationBookkeeper();
 this.homeSubcluster = policyContext.getHomeSubcluster();
 
   }
@@ -197,7 +196,9 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   List resourceRequests) throws YarnException {
 
 // object used to accumulate statistics about the answer, initialize with
-// active subclusters.
+// active subclusters. Create a new instance per call because this method
+// can be called concurrently.
+bookkeeper = new AllocationBookkeeper();
 bookkeeper.reinitialize(federationFacade.getSubClusters(true));
 
 List nonLocalizedRequests =
@@ -238,12 +239,16 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 // we log altogether later
   }
   if (targetIds != null && targetIds.size() > 0) {
+boolean hasActive = false;
 for (SubClusterId tid : targetIds) {
   if (bookkeeper.isActiveAndEnabled(tid)) {
 bookkeeper.addRackRR(tid, rr);
+hasActive = true;
   }
 }
-continue;
+if (hasActive) {
+  continue;
+}
   }
 
   // Handle node/rack requests that the SubClusterResolver cannot map to
@@ -347,7 +352,7 

hadoop git commit: HADOOP-12556. KafkaSink jar files are created but not copied to target dist (Babak Behzad via aw)

2017-02-28 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 235203dff -> 989bd56b9


HADOOP-12556. KafkaSink jar files are created but not copied to target dist 
(Babak Behzad via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/989bd56b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/989bd56b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/989bd56b

Branch: refs/heads/trunk
Commit: 989bd56b9fd521a67afbc691c9915be13d7c8e3a
Parents: 235203d
Author: Allen Wittenauer 
Authored: Tue Feb 28 16:15:40 2017 -0800
Committer: Allen Wittenauer 
Committed: Tue Feb 28 16:15:40 2017 -0800

--
 hadoop-tools/hadoop-tools-dist/pom.xml | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/989bd56b/hadoop-tools/hadoop-tools-dist/pom.xml
--
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml 
b/hadoop-tools/hadoop-tools-dist/pom.xml
index 00f07ef..dd28404 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -95,6 +95,12 @@
 
 
   org.apache.hadoop
+  hadoop-kafka
+  compile
+  ${project.version}
+
+
+  org.apache.hadoop
   hadoop-azure
   compile
   ${project.version}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. Contributed by Haibo Chen.

2017-02-28 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 ab192fd58 -> 34e7c3029
  refs/heads/YARN-5355-branch-2 db7c3f279 -> 57b945581


YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. 
Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34e7c302
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34e7c302
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34e7c302

Branch: refs/heads/YARN-5355
Commit: 34e7c30293b5a56f9f745769a29c5666bdb85d6c
Parents: ab192fd
Author: Sangjin Lee 
Authored: Tue Feb 28 16:10:25 2017 -0800
Committer: Sangjin Lee 
Committed: Tue Feb 28 16:10:25 2017 -0800

--
 .../timelineservice/storage/flow/FlowActivityColumnPrefix.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34e7c302/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 439e0c8..5e7a5d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -271,7 +271,7 @@ public enum FlowActivityColumnPrefix
 byte[] columnQualifier = getColumnPrefixBytes(qualifier);
 Attribute[] combinedAttributes =
 HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-column.store(rowKey, tableMutator, columnQualifier, null, inputValue,
+column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
 combinedAttributes);
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. Contributed by Haibo Chen.

2017-02-28 Thread sjlee
YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. 
Contributed by Haibo Chen.

(cherry picked from commit 34e7c30293b5a56f9f745769a29c5666bdb85d6c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57b94558
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57b94558
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57b94558

Branch: refs/heads/YARN-5355-branch-2
Commit: 57b94558160b244ba99434b88c1a786792b725a9
Parents: db7c3f2
Author: Sangjin Lee 
Authored: Tue Feb 28 16:10:25 2017 -0800
Committer: Sangjin Lee 
Committed: Tue Feb 28 16:11:01 2017 -0800

--
 .../timelineservice/storage/flow/FlowActivityColumnPrefix.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57b94558/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 439e0c8..5e7a5d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -271,7 +271,7 @@ public enum FlowActivityColumnPrefix
 byte[] columnQualifier = getColumnPrefixBytes(qualifier);
 Attribute[] combinedAttributes =
 HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-column.store(rowKey, tableMutator, columnQualifier, null, inputValue,
+column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
 combinedAttributes);
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core module. Contributed by Jian He

2017-02-28 Thread billie
YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core 
module. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39ef50cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39ef50cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39ef50cd

Branch: refs/heads/yarn-native-services
Commit: 39ef50cdb71219bec5ffd0355ceb2f95ba1d51fe
Parents: e156133
Author: Billie Rinaldi 
Authored: Tue Feb 28 14:45:20 2017 -0800
Committer: Billie Rinaldi 
Committed: Tue Feb 28 14:45:20 2017 -0800

--
 .../dev-support/findbugs-exclude.xml|   4 +-
 .../yarn/services/api/ApplicationApi.java   |   2 +-
 .../api/impl/ApplicationApiService.java |  20 +-
 .../yarn/services/resource/Application.java | 453 ---
 .../services/resource/ApplicationState.java |  30 --
 .../services/resource/ApplicationStatus.java| 145 --
 .../hadoop/yarn/services/resource/Artifact.java | 157 ---
 .../yarn/services/resource/BaseResource.java|  48 --
 .../yarn/services/resource/Component.java   | 381 
 .../yarn/services/resource/ConfigFile.java  | 192 
 .../yarn/services/resource/Configuration.java   | 149 --
 .../yarn/services/resource/Container.java   | 294 
 .../yarn/services/resource/ContainerState.java  |  25 -
 .../hadoop/yarn/services/resource/Error.java| 125 -
 .../yarn/services/resource/PlacementPolicy.java |  99 
 .../yarn/services/resource/ReadinessCheck.java  | 163 ---
 .../hadoop/yarn/services/resource/Resource.java | 149 --
 .../src/main/webapp/WEB-INF/web.xml |   2 +-
 .../api/impl/TestApplicationApiService.java |   6 +-
 .../hadoop-yarn-slider-core/pom.xml |   5 +
 .../apache/slider/api/resource/Application.java | 449 ++
 .../slider/api/resource/ApplicationState.java   |  30 ++
 .../slider/api/resource/ApplicationStatus.java  | 145 ++
 .../apache/slider/api/resource/Artifact.java| 157 +++
 .../slider/api/resource/BaseResource.java   |  48 ++
 .../apache/slider/api/resource/Component.java   | 381 
 .../apache/slider/api/resource/ConfigFile.java  | 192 
 .../slider/api/resource/Configuration.java  | 149 ++
 .../apache/slider/api/resource/Container.java   | 294 
 .../slider/api/resource/ContainerState.java |  25 +
 .../org/apache/slider/api/resource/Error.java   | 125 +
 .../slider/api/resource/PlacementPolicy.java|  99 
 .../slider/api/resource/ReadinessCheck.java | 163 +++
 .../apache/slider/api/resource/Resource.java| 149 ++
 34 files changed, 2427 insertions(+), 2428 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ef50cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
index 2843338..b89146a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
@@ -16,7 +16,5 @@
limitations under the License.
 -->
 
-
-
-
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ef50cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
index 654413c..0fb6402 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/ApplicationApi.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.services.api;
 
 import javax.ws.rs.core.Response;
 
-import org.apache.hadoop.yarn.services.resource.Application;
+import org.apache.slider.api.resource.Application;
 
 /**
  * Apache Hadoop YARN Services REST

[2/3] hadoop git commit: YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core module. Contributed by Jian He

2017-02-28 Thread billie
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ef50cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java
deleted file mode 100644
index 5df00a0..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/PlacementPolicy.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.services.resource;
-
-import io.swagger.annotations.ApiModel;
-import io.swagger.annotations.ApiModelProperty;
-
-import java.io.Serializable;
-import java.util.Objects;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * Placement policy of an instance of an application. This feature is in the
- * works in YARN-4902.
- **/
-
-@ApiModel(description = "Placement policy of an instance of an application. 
This feature is in the works in YARN-4902.")
-@javax.annotation.Generated(value = "class 
io.swagger.codegen.languages.JavaClientCodegen", date = 
"2016-06-02T08:15:05.615-07:00")
-public class PlacementPolicy implements Serializable {
-  private static final long serialVersionUID = 4341110649551172231L;
-
-  private String label = null;
-
-  /**
-   * Assigns an app to a named partition of the cluster where the application
-   * desires to run (optional). If not specified all apps are submitted to a
-   * default label of the app owner. One or more labels can be setup for each
-   * application owner account with required constraints like no-preemption,
-   * sla-9, preemption-ok, etc.
-   **/
-  public PlacementPolicy label(String label) {
-this.label = label;
-return this;
-  }
-
-  @ApiModelProperty(example = "null", value = "Assigns an app to a named 
partition of the cluster where the application desires to run (optional). If 
not specified all apps are submitted to a default label of the app owner. One 
or more labels can be setup for each application owner account with required 
constraints like no-preemption, sla-9, preemption-ok, etc.")
-  @JsonProperty("label")
-  public String getLabel() {
-return label;
-  }
-
-  public void setLabel(String label) {
-this.label = label;
-  }
-
-  @Override
-  public boolean equals(java.lang.Object o) {
-if (this == o) {
-  return true;
-}
-if (o == null || getClass() != o.getClass()) {
-  return false;
-}
-PlacementPolicy placementPolicy = (PlacementPolicy) o;
-return Objects.equals(this.label, placementPolicy.label);
-  }
-
-  @Override
-  public int hashCode() {
-return Objects.hash(label);
-  }
-
-  @Override
-  public String toString() {
-StringBuilder sb = new StringBuilder();
-sb.append("class PlacementPolicy {\n");
-
-sb.append("label: ").append(toIndentedString(label)).append("\n");
-sb.append("}");
-return sb.toString();
-  }
-
-  /**
-   * Convert the given object to string with each line indented by 4 spaces
-   * (except the first line).
-   */
-  private String toIndentedString(java.lang.Object o) {
-if (o == null) {
-  return "null";
-}
-return o.toString().replace("\n", "\n");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ef50cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ReadinessCheck.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ReadinessCheck.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache

[1/3] hadoop git commit: YARN-6192. Move yarn-native-service API records into hadoop-yarn-slider-core module. Contributed by Jian He

2017-02-28 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services e1561331c -> 39ef50cdb


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ef50cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java
new file mode 100644
index 000..c5dc627
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/resource/Container.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Date;
+import java.util.Objects;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * An instance of a running application container.
+ **/
+
+@ApiModel(description = "An instance of a running application container")
+@javax.annotation.Generated(value = "class 
io.swagger.codegen.languages.JavaClientCodegen", date = 
"2016-06-02T08:15:05.615-07:00")
+@XmlRootElement
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Container extends BaseResource {
+  private static final long serialVersionUID = -8955788064529288L;
+
+  private String id = null;
+  private Date launchTime = null;
+  private String ip = null;
+  private String hostname = null;
+  private String bareHost = null;
+  private ContainerState state = null;
+  private String componentName = null;
+  private Resource resource = null;
+  private Artifact artifact = null;
+  private Boolean privilegedContainer = null;
+
+  /**
+   * Unique container id of a running application, e.g.
+   * container_e3751_1458061340047_0008_01_02.
+   **/
+  public Container id(String id) {
+this.id = id;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Unique container id of a 
running application, e.g. container_e3751_1458061340047_0008_01_02.")
+  @JsonProperty("id")
+  public String getId() {
+return id;
+  }
+
+  public void setId(String id) {
+this.id = id;
+  }
+
+  /**
+   * The time when the container was created, e.g. 2016-03-16T01:01:49.000Z.
+   * This will most likely be different from cluster launch time.
+   **/
+  public Container launchTime(Date launchTime) {
+this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "The time when the container was 
created, e.g. 2016-03-16T01:01:49.000Z. This will most likely be different from 
cluster launch time.")
+  @JsonProperty("launch_time")
+  public Date getLaunchTime() {
+return launchTime == null ? null : (Date) launchTime.clone();
+  }
+
+  @XmlElement(name = "launch_time")
+  public void setLaunchTime(Date launchTime) {
+this.launchTime = launchTime == null ? null : (Date) launchTime.clone();
+  }
+
+  /**
+   * IP address of a running container, e.g. 172.31.42.141. The IP address and
+   * hostname attribute values are dependent on the cluster/docker network 
setup
+   * as per YARN-4007.
+   **/
+  public Container ip(String ip) {
+this.ip = ip;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "IP address of a running 
container, e.g. 172.31.42.141. The IP address and hostname attribute values are 
dependent on the cluster/docker network setup as per YARN-4007.")
+  @JsonProperty("ip")
+  public String getIp() {
+return ip;
+  }
+
+  public void setIp(String ip) {
+this.ip = ip;
+  }
+
+  /**
+   * Fully qualified hostname of a running container, e.g.
+

hadoop git commit: HADOOP-14129. ITestS3ACredentialsInURL sometimes fails. Contributed by Sean Mackrory

2017-02-28 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 0abbb7029 -> 32e9a24f9


HADOOP-14129. ITestS3ACredentialsInURL sometimes fails. Contributed by Sean 
Mackrory


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32e9a24f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32e9a24f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32e9a24f

Branch: refs/heads/HADOOP-13345
Commit: 32e9a24f905fb766369100e8a78f14434d7c6180
Parents: 0abbb70
Author: Mingliang Liu 
Authored: Tue Feb 28 13:22:27 2017 -0800
Committer: Mingliang Liu 
Committed: Tue Feb 28 13:22:27 2017 -0800

--
 .../java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32e9a24f/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
index f4f7fbb..5a4b2fc 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACredentialsInURL.java
@@ -84,6 +84,11 @@ public class ITestS3ACredentialsInURL extends Assert {
 conf.unset(Constants.ACCESS_KEY);
 conf.unset(Constants.SECRET_KEY);
 fs = S3ATestUtils.createTestFileSystem(conf);
+
+// Skip in the case of S3Guard with DynamoDB because it cannot get
+// credentials for its own use if they're only in S3 URLs
+Assume.assumeFalse(fs.hasMetadataStore());
+
 String fsURI = fs.getUri().toString();
 assertFalse("FS URI contains a @ symbol", fsURI.contains("@"));
 assertFalse("FS URI contains a % symbol", fsURI.contains("%"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6153. KeepContainer does not work when AM retry window is set. Contributed by kyungwan nam

2017-02-28 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95bd3c3d5 -> c7ddf95d4


YARN-6153. KeepContainer does not work when AM retry window is set. Contributed 
by kyungwan nam

(cherry picked from commit 235203dffda1482fb38762fde544c4dd9c3e1fa8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ddf95d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ddf95d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ddf95d

Branch: refs/heads/branch-2
Commit: c7ddf95d4a7ad3696515ab7abf6ba6d16e43bf3f
Parents: 95bd3c3
Author: Jian He 
Authored: Tue Feb 28 13:23:36 2017 -0800
Committer: Jian He 
Committed: Tue Feb 28 13:26:55 2017 -0800

--
 .../server/resourcemanager/rmapp/RMAppImpl.java | 17 +---
 .../rmapp/attempt/RMAppAttemptImpl.java | 46 +--
 .../yarn/server/resourcemanager/MockRM.java |  6 +-
 .../resourcemanager/TestClientRMService.java|  4 +-
 .../applicationsmanager/TestAMRestart.java  | 84 +---
 .../TestRMAppAttemptImplDiagnostics.java|  2 +-
 .../attempt/TestRMAppAttemptTransitions.java| 19 +++--
 7 files changed, 112 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ddf95d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 7f46eaf..a73cf14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -926,13 +926,7 @@ public class RMAppImpl implements RMApp, Recoverable {
 }
 RMAppAttempt attempt =
 new RMAppAttemptImpl(appAttemptId, rmContext, scheduler, masterService,
-  submissionContext, conf,
-  // The newly created attempt maybe last attempt if (number of
-  // previously failed attempts(which should not include Preempted,
-  // hardware error and NM resync) + 1) equal to the max-attempt
-  // limit.
-  maxAppAttempts == (getNumFailedAppAttempts() + 1), amReq,
-  currentAMBlacklistManager);
+  submissionContext, conf, amReq, this, currentAMBlacklistManager);
 attempts.put(appAttemptId, attempt);
 currentAttempt = attempt;
   }
@@ -1413,18 +1407,13 @@ public class RMAppImpl implements RMApp, Recoverable {
 };
   }
 
-  private int getNumFailedAppAttempts() {
+  public int getNumFailedAppAttempts() {
 int completedAttempts = 0;
-long endTime = this.systemClock.getTime();
 // Do not count AM preemption, hardware failures or NM resync
 // as attempt failure.
 for (RMAppAttempt attempt : attempts.values()) {
   if (attempt.shouldCountTowardsMaxAttemptRetry()) {
-if (this.attemptFailuresValidityInterval <= 0
-|| (attempt.getFinishTime() > endTime
-- this.attemptFailuresValidityInterval)) {
-  completedAttempts++;
-}
+completedAttempts++;
   }
 }
 return completedAttempts;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ddf95d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index da42ab6..6158f88 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -143,6 +143,7 @@ 

hadoop git commit: YARN-6153. KeepContainer does not work when AM retry window is set. Contributed by kyungwan nam

2017-02-28 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk e0bb867c3 -> 235203dff


YARN-6153. KeepContainer does not work when AM retry window is set. Contributed 
by kyungwan nam


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/235203df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/235203df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/235203df

Branch: refs/heads/trunk
Commit: 235203dffda1482fb38762fde544c4dd9c3e1fa8
Parents: e0bb867
Author: Jian He 
Authored: Tue Feb 28 13:23:36 2017 -0800
Committer: Jian He 
Committed: Tue Feb 28 13:23:36 2017 -0800

--
 .../server/resourcemanager/rmapp/RMAppImpl.java | 17 +---
 .../rmapp/attempt/RMAppAttemptImpl.java | 46 +--
 .../yarn/server/resourcemanager/MockRM.java |  6 +-
 .../resourcemanager/TestClientRMService.java|  4 +-
 .../applicationsmanager/TestAMRestart.java  | 84 +---
 .../TestRMAppAttemptImplDiagnostics.java|  2 +-
 .../attempt/TestRMAppAttemptTransitions.java| 19 +++--
 7 files changed, 112 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/235203df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 516109b..9f00b2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -994,13 +994,7 @@ public class RMAppImpl implements RMApp, Recoverable {
 }
 RMAppAttempt attempt =
 new RMAppAttemptImpl(appAttemptId, rmContext, scheduler, masterService,
-  submissionContext, conf,
-  // The newly created attempt maybe last attempt if (number of
-  // previously failed attempts(which should not include Preempted,
-  // hardware error and NM resync) + 1) equal to the max-attempt
-  // limit.
-  maxAppAttempts == (getNumFailedAppAttempts() + 1), amReq,
-  currentAMBlacklistManager);
+  submissionContext, conf, amReq, this, currentAMBlacklistManager);
 attempts.put(appAttemptId, attempt);
 currentAttempt = attempt;
   }
@@ -1498,18 +1492,13 @@ public class RMAppImpl implements RMApp, Recoverable {
 };
   }
 
-  private int getNumFailedAppAttempts() {
+  public int getNumFailedAppAttempts() {
 int completedAttempts = 0;
-long endTime = this.systemClock.getTime();
 // Do not count AM preemption, hardware failures or NM resync
 // as attempt failure.
 for (RMAppAttempt attempt : attempts.values()) {
   if (attempt.shouldCountTowardsMaxAttemptRetry()) {
-if (this.attemptFailuresValidityInterval <= 0
-|| (attempt.getFinishTime() > endTime
-- this.attemptFailuresValidityInterval)) {
-  completedAttempts++;
-}
+completedAttempts++;
   }
 }
 return completedAttempts;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/235203df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 25138c5..5c0f48e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -143,6 +143,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   pri

hadoop git commit: HDFS-11036. Ozone: reuse Xceiver connection. Contributed by Chen Liang.

2017-02-28 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 ad16978e6 -> 00684d62c


HDFS-11036. Ozone: reuse Xceiver connection. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00684d62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00684d62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00684d62

Branch: refs/heads/HDFS-7240
Commit: 00684d62ca7e0f4be3eef05f9634ce4102b33ef3
Parents: ad16978
Author: Anu Engineer 
Authored: Tue Feb 28 12:15:26 2017 -0800
Committer: Anu Engineer 
Committed: Tue Feb 28 12:15:26 2017 -0800

--
 .../org/apache/hadoop/scm/ScmConfigKeys.java|   5 +
 .../apache/hadoop/scm/XceiverClientManager.java | 112 +--
 2 files changed, 106 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00684d62/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
index 44414ea..1e7d994 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
@@ -30,6 +30,11 @@ public final class ScmConfigKeys {
   "dfs.container.ipc";
   public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 50011;
 
+  public static final String SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY =
+  "scm.container.client.idle.threshold";
+  public static final int SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT =
+  1;
+
   // TODO : this is copied from OzoneConsts, may need to move to a better place
   public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00684d62/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
index b9d7765..de706cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
@@ -19,27 +19,39 @@
 package org.apache.hadoop.scm;
 
 import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.base.Preconditions;
 
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
+import static 
org.apache.hadoop.scm.ScmConfigKeys.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT;
+import static 
org.apache.hadoop.scm.ScmConfigKeys.SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY;
+
 /**
  * XceiverClientManager is responsible for the lifecycle of XceiverClient
  * instances.  Callers use this class to acquire an XceiverClient instance
  * connected to the desired container pipeline.  When done, the caller also 
uses
  * this class to release the previously acquired XceiverClient instance.
  *
- * This class may evolve to implement efficient lifecycle management policies 
by
- * caching container location information and pooling connected client 
instances
- * for reuse without needing to reestablish a socket connection.  The current
- * implementation simply allocates and closes a new instance every time.
+ *
+ * This class caches connection to container for reuse purpose, such that
+ * accessing same container frequently will be through the same connection
+ * without reestablishing connection. But the connection will be closed if
+ * not being used for a period of time.
  */
 public class XceiverClientManager {
 
   //TODO : change this to SCM configuration class
   private final Configuration conf;
+  private Cache openClient;
+  private final long staleThresholdMs;
 
   /**
* Creates a new XceiverClientManager.
@@ -48,13 +60,38 @@ public class XceiverClientManager {
*/
   public XceiverClientManager(Configuration conf) {
 Preconditions.checkNotNull(conf);
+this.staleThresholdMs = conf.getTimeDuration(
+SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY,
+SCM_CONT

hadoop git commit: YARN-6189: Improve application status log message when RM restarted when app is in NEW state. Contributed by Junping Du

2017-02-28 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 df35ba81f -> 95bd3c3d5


YARN-6189: Improve application status log message when RM restarted when
app is in NEW state. Contributed by Junping Du

(cherry picked from commit e0bb867c3fa638c9f689ee0b044b400481cf02b5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95bd3c3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95bd3c3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95bd3c3d

Branch: refs/heads/branch-2
Commit: 95bd3c3d55625c732299dbf35d32f7690caee829
Parents: df35ba8
Author: Xuan 
Authored: Tue Feb 28 11:04:56 2017 -0800
Committer: Xuan 
Committed: Tue Feb 28 11:06:07 2017 -0800

--
 .../yarn/server/resourcemanager/ClientRMService.java | 15 ++-
 .../server/resourcemanager/TestClientRMService.java  |  3 ++-
 2 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bd3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 10745a9..e468813 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -357,7 +357,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
-  + applicationId + "' doesn't exist in RM.");
+  + applicationId + "' doesn't exist in RM. Please check "
+  + "that the job submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -391,7 +392,8 @@ public class ClientRMService extends AbstractService 
implements
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
   + request.getApplicationAttemptId().getApplicationId()
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job "
+  + "submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -430,7 +432,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -478,7 +481,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -528,7 +532,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95bd3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server

hadoop git commit: YARN-6189: Improve application status log message when RM restarted when app is in NEW state. Contributed by Junping Du

2017-02-28 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk eac6b4c35 -> e0bb867c3


YARN-6189: Improve application status log message when RM restarted when
app is in NEW state. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0bb867c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0bb867c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0bb867c

Branch: refs/heads/trunk
Commit: e0bb867c3fa638c9f689ee0b044b400481cf02b5
Parents: eac6b4c
Author: Xuan 
Authored: Tue Feb 28 11:04:56 2017 -0800
Committer: Xuan 
Committed: Tue Feb 28 11:04:56 2017 -0800

--
 .../yarn/server/resourcemanager/ClientRMService.java | 15 ++-
 .../server/resourcemanager/TestClientRMService.java  |  3 ++-
 2 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0bb867c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 48bccfb..929a9e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -359,7 +359,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
-  + applicationId + "' doesn't exist in RM.");
+  + applicationId + "' doesn't exist in RM. Please check "
+  + "that the job submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -393,7 +394,8 @@ public class ClientRMService extends AbstractService 
implements
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '"
   + request.getApplicationAttemptId().getApplicationId()
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job "
+  + "submission was successful.");
 }
 
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
@@ -432,7 +434,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -480,7 +483,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);
@@ -530,7 +534,8 @@ public class ClientRMService extends AbstractService 
implements
   // If the RM doesn't have the application, throw
   // ApplicationNotFoundException and let client to handle.
   throw new ApplicationNotFoundException("Application with id '" + appId
-  + "' doesn't exist in RM.");
+  + "' doesn't exist in RM. Please check that the job submission "
+  + "was successful.");
 }
 boolean allowAccess = checkAccess(callerUGI, application.getUser(),
 ApplicationAccessType.VIEW_APP, application);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0bb867c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager

[2/2] hadoop git commit: YARN-6216. Unify Container Resizing code paths with Container Updates making it scheduler agnostic. (Arun Suresh via wangda)

2017-02-28 Thread wangda
YARN-6216. Unify Container Resizing code paths with Container Updates making it 
scheduler agnostic. (Arun Suresh via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eac6b4c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eac6b4c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eac6b4c3

Branch: refs/heads/trunk
Commit: eac6b4c35c50e555c2f1b5f913bb2c4d839f1ff4
Parents: 480b4dd
Author: Wangda Tan 
Authored: Tue Feb 28 10:35:50 2017 -0800
Committer: Wangda Tan 
Committed: Tue Feb 28 10:35:50 2017 -0800

--
 .../sls/scheduler/ResourceSchedulerWrapper.java |   8 -
 .../server/scheduler/SchedulerRequestKey.java   |  12 +-
 .../server/resourcemanager/RMServerUtils.java   |  27 +-
 .../rmcontainer/RMContainer.java|   4 -
 .../RMContainerChangeResourceEvent.java |  44 ---
 .../rmcontainer/RMContainerImpl.java|  46 ---
 .../scheduler/AbstractYarnScheduler.java| 171 +++---
 .../scheduler/AppSchedulingInfo.java| 283 +---
 .../scheduler/ContainerUpdateContext.java   | 193 ---
 .../scheduler/SchedulerApplicationAttempt.java  | 212 
 .../scheduler/SchedulerNode.java|  44 ---
 .../scheduler/capacity/AbstractCSQueue.java |  13 +-
 .../scheduler/capacity/CSQueue.java |  15 -
 .../scheduler/capacity/CapacityScheduler.java   | 121 +--
 .../scheduler/capacity/LeafQueue.java   | 152 +
 .../scheduler/capacity/ParentQueue.java |  53 +--
 .../capacity/allocator/ContainerAllocator.java  |  31 +-
 .../allocator/IncreaseContainerAllocator.java   | 337 ---
 .../common/ContainerAllocationProposal.java |   9 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java | 245 +++---
 .../common/fica/FiCaSchedulerNode.java  |  14 -
 .../scheduler/fair/FairScheduler.java   |  11 +-
 .../scheduler/fifo/FifoScheduler.java   |   8 -
 .../scheduler/capacity/TestChildQueueOrder.java |   4 +-
 .../capacity/TestContainerResizing.java | 134 +---
 .../capacity/TestIncreaseAllocationExpirer.java |  12 +-
 .../scheduler/capacity/TestLeafQueue.java   |   4 +-
 .../scheduler/capacity/TestParentQueue.java |   4 +-
 28 files changed, 482 insertions(+), 1729 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eac6b4c3/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
index 5517362..df8323a 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
@@ -969,12 +969,4 @@ final public class ResourceSchedulerWrapper
 return Priority.newInstance(0);
   }
 
-  @Override
-  protected void decreaseContainer(
-  SchedContainerChangeRequest decreaseRequest,
-  SchedulerApplicationAttempt attempt) {
-// TODO Auto-generated method stub
-
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eac6b4c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
index 02539ba..c4f37f6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/scheduler/SchedulerRequestKey.java
@@ -116,7 +116,17 @@ public final class SchedulerRequestKey implements
 if (priorityCompare != 0) {
   return priorityCompare;
 }
-return Long.compare(allocationRequestId, o.getAllocationRequestId());
+int allocReqCompare = Long.compare(
+allocationRequestId, o.getAllocationRequestId());
+
+if (allocReqCompare != 0) {
+  return allocReqCompare;
+}
+
+if (this.containerToUpdate != null && o.containerToUpdate != null) {
+  r

[1/2] hadoop git commit: YARN-6216. Unify Container Resizing code paths with Container Updates making it scheduler agnostic. (Arun Suresh via wangda)

2017-02-28 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 480b4dd57 -> eac6b4c35


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eac6b4c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index b65f16a..1b20556 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -1134,12 +1134,7 @@ public class LeafQueue extends AbstractCSQueue {
 
 if (targetLeafQueue == this) {
   // When trying to preempt containers from the same queue
-  if (rmContainer.hasIncreaseReservation()) {
-// Increased container reservation
-unreserveIncreasedContainer(clusterResource,
-schedulerContainer.getSchedulerApplicationAttempt(),
-schedulerContainer.getSchedulerNode(), rmContainer);
-  } else if (rmContainer.getState() == RMContainerState.RESERVED) {
+  if (rmContainer.getState() == RMContainerState.RESERVED) {
 // For other reserved containers
 // This is a reservation exchange, complete previous reserved container
 completedContainer(clusterResource,
@@ -1212,8 +1207,7 @@ public class LeafQueue extends AbstractCSQueue {
   schedulerContainer.getSchedulerApplicationAttempt(),
   allocation.getAllocatedOrReservedResource(),
   schedulerContainer.getNodePartition(),
-  schedulerContainer.getRmContainer(),
-  allocation.isIncreasedAllocation());
+  schedulerContainer.getRmContainer());
   orderingPolicy.containerAllocated(
   schedulerContainer.getSchedulerApplicationAttempt(),
   schedulerContainer.getRmContainer());
@@ -1446,40 +1440,6 @@ public class LeafQueue extends AbstractCSQueue {
   readLock.unlock();
 }
   }
-  
-  @Override
-  public void unreserveIncreasedContainer(Resource clusterResource,
-  FiCaSchedulerApp app, FiCaSchedulerNode node, RMContainer rmContainer) {
-boolean removed = false;
-Priority priority = null;
-
-try {
-  writeLock.lock();
-  if (rmContainer.getContainer() != null) {
-priority = rmContainer.getContainer().getPriority();
-  }
-
-  if (null != priority) {
-removed = app.unreserve(rmContainer.getAllocatedSchedulerKey(), node,
-rmContainer);
-  }
-
-  if (removed) {
-// Inform the ordering policy
-orderingPolicy.containerReleased(app, rmContainer);
-
-releaseResource(clusterResource, app, 
rmContainer.getReservedResource(),
-node.getPartition(), rmContainer, true);
-  }
-} finally {
-  writeLock.unlock();
-}
-
-if (removed) {
-  getParent().unreserveIncreasedContainer(clusterResource, app, node,
-  rmContainer);
-}
-  }
 
   private void updateSchedulerHealthForCompletedContainer(
   RMContainer rmContainer, ContainerStatus containerStatus) {
@@ -1538,16 +1498,6 @@ public class LeafQueue extends AbstractCSQueue {
 updateSchedulerHealthForCompletedContainer(rmContainer, containerStatus);
 
 if (application != null) {
-  // unreserve container increase request if it previously reserved.
-  if (rmContainer.hasIncreaseReservation()) {
-unreserveIncreasedContainer(clusterResource, application, node,
-rmContainer);
-  }
-  
-  // Remove container increase request if it exists
-  application.removeIncreaseRequest(node.getNodeID(),
-  rmContainer.getAllocatedSchedulerKey(), 
rmContainer.getContainerId());
-
   boolean removed = false;
 
   // Careful! Locking order is important!
@@ -1576,7 +1526,7 @@ public class LeafQueue extends AbstractCSQueue {
   orderingPolicy.containerReleased(application, rmContainer);
 
   releaseResource(clusterResource, application, 
container.getResource(),
-  node.getPartition(), rmContainer, false);
+  node.getPartition(), rmContainer);
 }
   } finally {
 writeLock.unlock();
@@ -1597,12 +1547,10 @@ public class LeafQueue extends AbstractCSQueue {
 
   void allocateResource(Resource clusterResource,
   SchedulerApplicationAt

hadoop git commit: HDFS-11414. Ozone : move StorageContainerLocation protocol to hdfs-client. Contributed by Chen Liang.

2017-02-28 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 d63ec0ca8 -> ad16978e6


HDFS-11414. Ozone : move StorageContainerLocation protocol to hdfs-client. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad16978e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad16978e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad16978e

Branch: refs/heads/HDFS-7240
Commit: ad16978e65df67cf735b6255f4f062dac0ee1523
Parents: d63ec0c
Author: Arpit Agarwal 
Authored: Tue Feb 28 10:28:05 2017 -0800
Committer: Arpit Agarwal 
Committed: Tue Feb 28 10:28:05 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   1 +
 .../hadoop/scm/protocol/LocatedContainer.java   | 127 +
 .../StorageContainerLocationProtocol.java   |  52 +++
 .../hadoop/scm/protocol/package-info.java   |  19 +++
 ...rLocationProtocolClientSideTranslatorPB.java | 141 +++
 .../StorageContainerLocationProtocolPB.java |  34 +
 .../hadoop/scm/protocolPB/package-info.java |  24 
 .../StorageContainerLocationProtocol.proto  |  99 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 -
 .../server/datanode/ObjectStoreHandler.java |   4 +-
 .../hadoop/ozone/protocol/LocatedContainer.java | 127 -
 .../StorageContainerLocationProtocol.java   |  54 ---
 ...rLocationProtocolClientSideTranslatorPB.java | 141 ---
 .../StorageContainerLocationProtocolPB.java |  34 -
 ...rLocationProtocolServerSideTranslatorPB.java |   5 +-
 .../ozone/scm/StorageContainerManager.java  |   6 +-
 .../web/storage/DistributedStorageHandler.java  |   4 +-
 .../StorageContainerLocationProtocol.proto  |  99 -
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   4 +-
 .../ozone/TestStorageContainerManager.java  |   4 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |   3 +-
 21 files changed, 512 insertions(+), 471 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad16978e/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 9c8dd1b..31e6408 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -165,6 +165,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
   inotify.proto
   erasurecoding.proto
   ReconfigurationProtocol.proto
+  StorageContainerLocationProtocol.proto
   DatanodeContainerProtocol.proto
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad16978e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/LocatedContainer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/LocatedContainer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/LocatedContainer.java
new file mode 100644
index 000..6e89d0c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/LocatedContainer.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.scm.protocol;
+
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+/**
+ * Holds the nodes that currently host the container for an object key hash.
+ */
+@InterfaceAudience.Private
+public final class LocatedContainer {
+  private final String key;
+  private final String matchedKeyPrefix;
+  private final String containerName;
+  private final Set locations;
+  private final DatanodeInfo leader;
+
+  /**
+   * Creates a LocatedContainer

hadoop git commit: YARN-1728. Workaround guice3x-undecoded pathInfo in YARN WebApp. (Yuanbo Liu via gera)

2017-02-28 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 33f426334 -> c8b1112ed


YARN-1728. Workaround guice3x-undecoded pathInfo in YARN WebApp. (Yuanbo Liu 
via gera)

(cherry picked from commit df35ba81fe26f526a1534b72089fbb310efccdd9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8b1112e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8b1112e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8b1112e

Branch: refs/heads/branch-2.7
Commit: c8b1112ed977408cddea5b645ce7dfd2fe7ded51
Parents: 33f4263
Author: Gera Shegalov 
Authored: Tue Feb 28 09:41:54 2017 -0800
Committer: Gera Shegalov 
Committed: Tue Feb 28 10:07:18 2017 -0800

--
 .../apache/hadoop/yarn/webapp/Dispatcher.java   | 11 +++
 .../apache/hadoop/yarn/webapp/TestWebApp.java   | 30 
 2 files changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8b1112e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
index 66dd21b..2f49922 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.webapp;
 import static com.google.common.base.Preconditions.checkState;
 
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.Timer;
 import java.util.TimerTask;
 
@@ -115,6 +117,15 @@ public class Dispatcher extends HttpServlet {
 if (pathInfo == null) {
   pathInfo = "/";
 }
+// The implementation class of HttpServletRequest in
+// Guice-3.0 does not decode paths that are encoded,
+// decode path info here for further operation.
+try {
+  pathInfo = new URI(pathInfo).getPath();
+}  catch (URISyntaxException ex) {
+  // Just leave it alone for compatibility.
+  LOG.error(pathInfo + ": Failed to decode path.", ex);
+}
 Controller.RequestContext rc =
 injector.getInstance(Controller.RequestContext.class);
 if (setCookieParams(rc, req) > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8b1112e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
index 6eaeb2b..99677df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
@@ -33,6 +33,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.net.URLEncoder;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.yarn.MockApps;
@@ -260,6 +261,35 @@ public class TestWebApp {
 }
   }
 
+  @Test public void testEncodedUrl() throws Exception {
+WebApp app =
+WebApps.$for("test", TestWebApp.class, this, "ws").start(new WebApp() {
+  @Override
+  public void setup() {
+bind(MyTestJAXBContextResolver.class);
+bind(MyTestWebService.class);
+
+route("/:foo", FooController.class);
+  }
+});
+String baseUrl = baseUrl(app);
+
+try {
+  // Test encoded url
+  String rawPath = "localhost:8080";
+  String encodedUrl = baseUrl + "test/" +
+  URLEncoder.encode(rawPath, "UTF-8");
+  assertEquals("foo" + rawPath, getContent(encodedUrl).trim());
+
+  rawPath = "@;%$";
+  encodedUrl = baseUrl + "test/" +
+  URLEncoder.encode(rawPath, "UTF-8");
+  assertEquals("foo" + rawPath, getContent(encodedUrl).trim());
+} finally {
+  app.stop();
+}
+  }
+
   // This is to test the GuiceFilter should only be applied to webAppContext,
   // not to staticContext  and logContext;
   @Test public void testYARNWebAppContext() throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr

hadoop git commit: YARN-1728. Workaround guice3x-undecoded pathInfo in YARN WebApp. (Yuanbo Liu via gera)

2017-02-28 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5fb785e15 -> df35ba81f


YARN-1728. Workaround guice3x-undecoded pathInfo in YARN WebApp. (Yuanbo Liu 
via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df35ba81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df35ba81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df35ba81

Branch: refs/heads/branch-2
Commit: df35ba81fe26f526a1534b72089fbb310efccdd9
Parents: 5fb785e
Author: Gera Shegalov 
Authored: Tue Feb 28 09:41:54 2017 -0800
Committer: Gera Shegalov 
Committed: Tue Feb 28 09:46:57 2017 -0800

--
 .../apache/hadoop/yarn/webapp/Dispatcher.java   | 11 +++
 .../apache/hadoop/yarn/webapp/TestWebApp.java   | 31 +++-
 2 files changed, 41 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df35ba81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
index d519dbb..eed077a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.webapp;
 import static com.google.common.base.Preconditions.checkState;
 
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.Timer;
 import java.util.TimerTask;
 
@@ -116,6 +118,15 @@ public class Dispatcher extends HttpServlet {
 if (pathInfo == null) {
   pathInfo = "/";
 }
+// The implementation class of HttpServletRequest in
+// Guice-3.0 does not decode paths that are encoded,
+// decode path info here for further operation.
+try {
+  pathInfo = new URI(pathInfo).getPath();
+}  catch (URISyntaxException ex) {
+  // Just leave it alone for compatibility.
+  LOG.error(pathInfo + ": Failed to decode path.", ex);
+}
 Controller.RequestContext rc =
 injector.getInstance(Controller.RequestContext.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df35ba81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
index 9454002..db50dd3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
@@ -27,12 +27,12 @@ import static 
org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.net.URLEncoder;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -263,6 +263,35 @@ public class TestWebApp {
 }
   }
 
+  @Test public void testEncodedUrl() throws Exception {
+WebApp app =
+WebApps.$for("test", TestWebApp.class, this, "ws").start(new WebApp() {
+  @Override
+  public void setup() {
+bind(MyTestJAXBContextResolver.class);
+bind(MyTestWebService.class);
+
+route("/:foo", FooController.class);
+  }
+});
+String baseUrl = baseUrl(app);
+
+try {
+  // Test encoded url
+  String rawPath = "localhost:8080";
+  String encodedUrl = baseUrl + "test/" +
+  URLEncoder.encode(rawPath, "UTF-8");
+  assertEquals("foo" + rawPath, getContent(encodedUrl).trim());
+
+  rawPath = "@;%$";
+  encodedUrl = baseUrl + "test/" +
+  URLEncoder.encode(rawPath, "UTF-8");
+  assertEquals("foo" + rawPath, getContent(encodedUrl).trim());
+} finally {
+  app.stop();
+}
+  }
+
   @Test public void testRobotsText() throws Exception {
 WebApp app =
 WebApps.$for("test

hadoop git commit: YARN-1728. Regression test for guice-undecoded pathInfo in YARN WebApp. (Yuanbo Liu via gera)

2017-02-28 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/trunk d269b488a -> 480b4dd57


YARN-1728. Regression test for guice-undecoded pathInfo in YARN WebApp. (Yuanbo 
Liu via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/480b4dd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/480b4dd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/480b4dd5

Branch: refs/heads/trunk
Commit: 480b4dd574d0355bf6c976a38bb45cb86adb2714
Parents: d269b48
Author: Gera Shegalov 
Authored: Tue Feb 28 09:22:29 2017 -0800
Committer: Gera Shegalov 
Committed: Tue Feb 28 09:27:22 2017 -0800

--
 .../apache/hadoop/yarn/webapp/TestWebApp.java   | 31 +++-
 1 file changed, 30 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/480b4dd5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
index 9454002..db50dd3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java
@@ -27,12 +27,12 @@ import static 
org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.InputStream;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.net.URLEncoder;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -263,6 +263,35 @@ public class TestWebApp {
 }
   }
 
+  @Test public void testEncodedUrl() throws Exception {
+WebApp app =
+WebApps.$for("test", TestWebApp.class, this, "ws").start(new WebApp() {
+  @Override
+  public void setup() {
+bind(MyTestJAXBContextResolver.class);
+bind(MyTestWebService.class);
+
+route("/:foo", FooController.class);
+  }
+});
+String baseUrl = baseUrl(app);
+
+try {
+  // Test encoded url
+  String rawPath = "localhost:8080";
+  String encodedUrl = baseUrl + "test/" +
+  URLEncoder.encode(rawPath, "UTF-8");
+  assertEquals("foo" + rawPath, getContent(encodedUrl).trim());
+
+  rawPath = "@;%$";
+  encodedUrl = baseUrl + "test/" +
+  URLEncoder.encode(rawPath, "UTF-8");
+  assertEquals("foo" + rawPath, getContent(encodedUrl).trim());
+} finally {
+  app.stop();
+}
+  }
+
   @Test public void testRobotsText() throws Exception {
 WebApp app =
 WebApps.$for("test", TestWebApp.class, this, "ws").start(new WebApp() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 1000ms to 5000ms. Contributed by Andrew Wang.

2017-02-28 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 58eefc71c -> 5fb785e15


HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 
1000ms to 5000ms. Contributed by Andrew Wang.

(cherry picked from commit d269b488a71a158d3ddcbdea96992abe29725c69)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fb785e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fb785e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fb785e1

Branch: refs/heads/branch-2
Commit: 5fb785e154b429beb0b2e392c39be91dae65bb5c
Parents: 58eefc7
Author: Zhe Zhang 
Authored: Tue Feb 28 09:00:49 2017 -0800
Committer: Zhe Zhang 
Committed: Tue Feb 28 09:02:59 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java| 2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb785e1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 458f47c..853306b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -399,7 +399,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // event to be logged
   public static final String  
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.write-lock-reporting-threshold-ms";
-  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 1000L;
+  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
   public static final String  
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.read-lock-reporting-threshold-ms";
   public static final long
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fb785e1/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 281753b..4ea0e69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2657,7 +2657,7 @@
 
 
   dfs.namenode.write-lock-reporting-threshold-ms
-  1000
+  5000
   When a write lock is held on the namenode for a long time,
 this will be logged as the lock is released. This sets how long the
 lock must be held for logging to occur.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 1000ms to 5000ms. Contributed by Andrew Wang.

2017-02-28 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c2a4ce771 -> 33f426334


HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 
1000ms to 5000ms. Contributed by Andrew Wang.

(cherry picked from commit d269b488a71a158d3ddcbdea96992abe29725c69)
(cherry picked from commit 5fb785e154b429beb0b2e392c39be91dae65bb5c)
(cherry picked from commit 265ddb20c3aa99c2d91e2a85e82106d4960f5a7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33f42633
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33f42633
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33f42633

Branch: refs/heads/branch-2.7
Commit: 33f4263345087d0b133ee3aba86b0ddfe239d81b
Parents: c2a4ce7
Author: Zhe Zhang 
Authored: Tue Feb 28 09:00:49 2017 -0800
Committer: Zhe Zhang 
Committed: Tue Feb 28 09:07:23 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java   | 2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   | 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f42633/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c046a18..3234fc2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -72,6 +72,9 @@ Release 2.7.4 - UNRELEASED
 
 HDFS-11333. Print a user friendly error message when plugins are not 
found. (Wei-Chiu Chuang)
 
+HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default
+from 1000ms to 5000ms. (wang via zhz)
+
   OPTIMIZATIONS
 
 HDFS-10896. Move lock logging logic from FSNamesystem into 
FSNamesystemLock.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f42633/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 83f882b..64186a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -385,7 +385,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // event to be logged
   public static final String  
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.write-lock-reporting-threshold-ms";
-  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 1000L;
+  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
   public static final String  
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.read-lock-reporting-threshold-ms";
   public static final long
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33f42633/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 6f046d0..40fcce8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2338,7 +2338,7 @@
 
 
   dfs.namenode.write-lock-reporting-threshold-ms
-  1000
+  5000
   When a write lock is held on the namenode for a long time,
 this will be logged as the lock is released. This sets how long the
 lock must be held for logging to occur.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 1000ms to 5000ms. Contributed by Andrew Wang.

2017-02-28 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 6833b4ea3 -> 265ddb20c


HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 
1000ms to 5000ms. Contributed by Andrew Wang.

(cherry picked from commit d269b488a71a158d3ddcbdea96992abe29725c69)
(cherry picked from commit 5fb785e154b429beb0b2e392c39be91dae65bb5c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/265ddb20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/265ddb20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/265ddb20

Branch: refs/heads/branch-2.8
Commit: 265ddb20c3aa99c2d91e2a85e82106d4960f5a7b
Parents: 6833b4e
Author: Zhe Zhang 
Authored: Tue Feb 28 09:00:49 2017 -0800
Committer: Zhe Zhang 
Committed: Tue Feb 28 09:04:07 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java| 2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/265ddb20/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f28f9d3..2cb2823 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -390,7 +390,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // event to be logged
   public static final String  
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.write-lock-reporting-threshold-ms";
-  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 1000L;
+  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
   public static final String  
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.read-lock-reporting-threshold-ms";
   public static final long
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/265ddb20/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index ab8418f..c314132 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2624,7 +2624,7 @@
 
 
   dfs.namenode.write-lock-reporting-threshold-ms
-  1000
+  5000
   When a write lock is held on the namenode for a long time,
 this will be logged as the lock is released. This sets how long the
 lock must be held for logging to occur.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 1000ms to 5000ms. Contributed by Andrew Wang.

2017-02-28 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0f35443bf -> d269b488a


HDFS-11466. Change dfs.namenode.write-lock-reporting-threshold-ms default from 
1000ms to 5000ms. Contributed by Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d269b488
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d269b488
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d269b488

Branch: refs/heads/trunk
Commit: d269b488a71a158d3ddcbdea96992abe29725c69
Parents: 0f35443
Author: Zhe Zhang 
Authored: Tue Feb 28 09:00:49 2017 -0800
Committer: Zhe Zhang 
Committed: Tue Feb 28 09:00:49 2017 -0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java| 2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d269b488/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cfd16aa..68cef36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -425,7 +425,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // event to be logged
   public static final String  
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.write-lock-reporting-threshold-ms";
-  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 1000L;
+  public static final long
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
   public static final String  
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
   "dfs.namenode.read-lock-reporting-threshold-ms";
   public static final long
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d269b488/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index d23b967..c220025 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2720,7 +2720,7 @@
 
 
   dfs.namenode.write-lock-reporting-threshold-ms
-  1000
+  5000
   When a write lock is held on the namenode for a long time,
 this will be logged as the lock is released. This sets how long the
 lock must be held for logging to occur.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6239. Fix javadoc warnings in YARN that caused by deprecated FileSystem APIs. Contributed by Yiqun Lin.

2017-02-28 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk ef488044d -> 0f35443bf


YARN-6239. Fix javadoc warnings in YARN that caused by deprecated FileSystem 
APIs. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f35443b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f35443b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f35443b

Branch: refs/heads/trunk
Commit: 0f35443bf262bf2b92ca01cc76178aeb32533ae2
Parents: ef48804
Author: Yiqun Lin 
Authored: Tue Feb 28 18:51:56 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 28 18:51:56 2017 +0800

--
 .../TestFileSystemApplicationHistoryStore.java| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f35443b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
index bd6bea3..15a00d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -35,6 +35,7 @@ import static org.mockito.Mockito.verify;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
@@ -49,6 +50,7 @@ import 
org.apache.hadoop.yarn.server.applicationhistoryservice.records.Container
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestFileSystemApplicationHistoryStore extends
 ApplicationHistoryStoreTestUtils {
@@ -271,7 +273,9 @@ public class TestFileSystemApplicationHistoryStore extends
 
 // Setup file system to inject startup conditions
 FileSystem fs = spy(new RawLocalFileSystem());
-doReturn(true).when(fs).isDirectory(any(Path.class));
+FileStatus fileStatus = Mockito.mock(FileStatus.class);
+doReturn(true).when(fileStatus).isDirectory();
+doReturn(fileStatus).when(fs).getFileStatus(any(Path.class));
 
 try {
   initAndStartStore(fs);
@@ -280,7 +284,7 @@ public class TestFileSystemApplicationHistoryStore extends
 }
 
 // Make sure that directory creation was not attempted
-verify(fs, never()).isDirectory(any(Path.class));
+verify(fileStatus, never()).isDirectory();
 verify(fs, times(1)).mkdirs(any(Path.class));
   }
 
@@ -291,7 +295,9 @@ public class TestFileSystemApplicationHistoryStore extends
 
 // Setup file system to inject startup conditions
 FileSystem fs = spy(new RawLocalFileSystem());
-doReturn(false).when(fs).isDirectory(any(Path.class));
+FileStatus fileStatus = Mockito.mock(FileStatus.class);
+doReturn(false).when(fileStatus).isDirectory();
+doReturn(fileStatus).when(fs).getFileStatus(any(Path.class));
 doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
 
 try {
@@ -302,7 +308,7 @@ public class TestFileSystemApplicationHistoryStore extends
 }
 
 // Make sure that directory creation was attempted
-verify(fs, never()).isDirectory(any(Path.class));
+verify(fileStatus, never()).isDirectory();
 verify(fs, times(1)).mkdirs(any(Path.class));
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5335. Use em-table in app/nodes pages for new YARN UI. Contributed by Sunil G.

2017-02-28 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk f187d6381 -> ef488044d


YARN-5335. Use em-table in app/nodes pages for new YARN UI. Contributed by 
Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef488044
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef488044
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef488044

Branch: refs/heads/trunk
Commit: ef488044d08983e84235e8da1cf3a7fdacadd70d
Parents: f187d63
Author: Sunil G 
Authored: Tue Feb 28 13:49:19 2017 +0530
Committer: Sunil G 
Committed: Tue Feb 28 13:49:19 2017 +0530

--
 .../src/main/webapp/app/adapters/yarn-app.js|   4 +-
 .../app/components/app-usage-donut-chart.js |   1 -
 .../main/webapp/app/components/donut-chart.js   |   1 -
 .../webapp/app/controllers/app-table-columns.js |  98 +
 .../webapp/app/controllers/yarn-apps/apps.js|  25 +
 .../app/controllers/yarn-apps/services.js   |  25 +
 .../main/webapp/app/controllers/yarn-node.js|   3 +-
 .../webapp/app/controllers/yarn-nodes/table.js  | 109 +++
 .../webapp/app/controllers/yarn-queue/apps.js   |  31 ++
 .../src/main/webapp/app/models/yarn-rm-node.js  |   8 ++
 .../src/main/webapp/app/routes/yarn-apps.js |   2 -
 .../main/webapp/app/routes/yarn-apps/apps.js|   9 ++
 .../webapp/app/routes/yarn-apps/services.js |  11 ++
 .../src/main/webapp/app/routes/yarn-queue.js|   4 +-
 .../src/main/webapp/app/routes/yarn-queues.js   |   3 +-
 .../src/main/webapp/app/serializers/yarn-app.js |   3 +-
 .../webapp/app/templates/yarn-apps/apps.hbs |  13 ++-
 .../webapp/app/templates/yarn-apps/services.hbs |   4 +-
 .../webapp/app/templates/yarn-nodes/table.hbs   |  42 +--
 .../webapp/app/templates/yarn-queue/apps.hbs|   3 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |   9 +-
 .../unit/controllers/app-table-columns-test.js  |  30 +
 .../unit/controllers/yarn-apps/apps-test.js |  30 +
 .../unit/controllers/yarn-apps/services-test.js |  30 +
 .../unit/controllers/yarn-nodes/table-test.js   |  30 +
 .../unit/controllers/yarn-queue/apps-test.js|  30 +
 26 files changed, 492 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef488044/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
index b34c606..fc52f7c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-app.js
@@ -25,9 +25,7 @@ export default AbstractAdapter.extend({
 
   urlForQuery(query/*, modelName*/) {
 var url = this._buildURL();
-if (query.state) {
-  url = url + '/apps/?state=' + query.state;
-}
+url = url + '/apps';
 return url;
   },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef488044/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
index 274c219..c72d934 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/app-usage-donut-chart.js
@@ -36,7 +36,6 @@ export default BaseUsageDonutChart.extend({
   value: v.toFixed(2)
 });
 
-console.log(v);
 avail = avail - v;
   }
 }.bind(this));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef488044/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
index e5699b4..82d2d46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/donut-chart.js
@@ -54,7 +54,6 @@ export default BaseChartComponent.extend({
 // 50 is for title
 var outerRadius = (h - 50 - 2 * layout.margin