hadoop git commit: HADOOP-15328. Fix the typo in HttpAuthentication.md. Contributed by fang zhenyi

2018-04-09 Thread bharat
Repository: hadoop
Updated Branches:
  refs/heads/trunk 907919d28 -> 0006346ab


HADOOP-15328. Fix the typo in HttpAuthentication.md. Contributed by fang zhenyi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0006346a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0006346a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0006346a

Branch: refs/heads/trunk
Commit: 0006346abe209a07d149fe5fd5a25cda0af26e07
Parents: 907919d
Author: Bharat 
Authored: Mon Apr 9 16:37:49 2018 -0700
Committer: Bharat 
Committed: Mon Apr 9 16:37:49 2018 -0700

--
 .../hadoop-common/src/site/markdown/HttpAuthentication.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0006346a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
index 44d814c..721abea 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
@@ -28,7 +28,7 @@ Hadoop HTTP web-consoles can be configured to require 
Kerberos authentication us
 
 In addition, Hadoop HTTP web-consoles support the equivalent of Hadoop's 
Pseudo/Simple authentication. If this option is enabled, the user name must be 
specified in the first browser interaction using the user.name query string 
parameter. e.g. `http://localhost:8088/cluster?user.name=babu`.
 
-If a custom authentication mechanism is required for the HTTP web-consoles, it 
is possible to implement a plugin to support the alternate authentication 
mechanism (refer to Hadoop hadoop-auth for details on writing an 
`AuthenticatorHandler`).
+If a custom authentication mechanism is required for the HTTP web-consoles, it 
is possible to implement a plugin to support the alternate authentication 
mechanism (refer to Hadoop hadoop-auth for details on writing an 
`AuthenticationHandler`).
 
 The next section describes how to configure Hadoop HTTP web-consoles to 
require user authentication.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: YARN-8100. Support API interface to query cluster attributes and attribute to nodes. Contributed by Bibin A Chundatt.

2018-04-09 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3409 5fc916392 -> efcca85f5


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efcca85f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
index 5ce4803..f6adb43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
@@ -39,8 +39,12 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
@@ -327,6 +331,19 @@ public class DefaultClientRequestInterceptor
 return clientRMProxy.getResourceTypeInfo(request);
   }
 
+  @Override
+  public GetAttributesToNodesResponse getAttributesToNodes(
+  GetAttributesToNodesRequest request) throws YarnException, IOException {
+return clientRMProxy.getAttributesToNodes(request);
+  }
+
+  @Override
+  public GetClusterNodeAttributesResponse getClusterNodeAttributes(
+  GetClusterNodeAttributesRequest request)
+  throws YarnException, IOException {
+return clientRMProxy.getClusterNodeAttributes(request);
+  }
+
   @VisibleForTesting
   public void setRMClient(ApplicationClientProtocol clientRM) {
 this.clientRMProxy = clientRM;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efcca85f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
index 07eaf97..781ccfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/FederationClientInterceptor.java
@@ -44,8 +44,12 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
 import 

[2/2] hadoop git commit: YARN-8100. Support API interface to query cluster attributes and attribute to nodes. Contributed by Bibin A Chundatt.

2018-04-09 Thread naganarasimha_gr
YARN-8100. Support API interface to query cluster attributes and attribute to 
nodes. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efcca85f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efcca85f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efcca85f

Branch: refs/heads/YARN-3409
Commit: efcca85f534790a5f9df09c0f2fa6a772b3d3add
Parents: 5fc9163
Author: Naganarasimha 
Authored: Tue Apr 10 07:28:53 2018 +0800
Committer: Naganarasimha 
Committed: Tue Apr 10 07:28:53 2018 +0800

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  13 ++
 .../hadoop/mapred/TestClientRedirect.java   |  17 ++
 .../yarn/api/ApplicationClientProtocol.java |  37 
 .../GetAttributesToNodesRequest.java|  70 +++
 .../GetAttributesToNodesResponse.java   |  62 +++
 .../GetClusterNodeAttributesRequest.java|  47 +
 .../GetClusterNodeAttributesResponse.java   |  72 
 .../hadoop/yarn/conf/YarnConfiguration.java |   2 +-
 .../main/proto/applicationclient_protocol.proto |   2 +
 .../src/main/proto/yarn_protos.proto|   4 +
 .../src/main/proto/yarn_service_protos.proto|  15 ++
 .../hadoop/yarn/client/api/YarnClient.java  |  36 +++-
 .../yarn/client/api/impl/YarnClientImpl.java|  21 ++-
 .../ApplicationClientProtocolPBClientImpl.java  |  37 
 .../ApplicationClientProtocolPBServiceImpl.java |  44 +
 .../pb/GetAttributesToNodesRequestPBImpl.java   | 175 ++
 .../pb/GetAttributesToNodesResponsePBImpl.java  | 184 +++
 .../GetClusterNodeAttributesRequestPBImpl.java  |  75 
 .../GetClusterNodeAttributesResponsePBImpl.java | 156 
 .../yarn/nodelabels/NodeAttributesManager.java  |   9 +-
 .../hadoop/yarn/api/TestPBImplRecords.java  |  28 +++
 .../yarn/server/MockResourceManagerFacade.java  |  17 ++
 .../server/resourcemanager/ClientRMService.java |  30 +++
 .../nodelabels/NodeAttributesManagerImpl.java   |  33 ++--
 .../resourcemanager/TestClientRMService.java| 126 +
 .../DefaultClientRequestInterceptor.java|  17 ++
 .../clientrm/FederationClientInterceptor.java   |  17 ++
 .../router/clientrm/RouterClientRMService.java  |  19 ++
 .../PassThroughClientRequestInterceptor.java|  17 ++
 29 files changed, 1361 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efcca85f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index ac4b73b..4f96a6b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerReport;
+import org.apache.hadoop.yarn.api.records.NodeAttribute;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeReport;
@@ -538,4 +539,16 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 return client.getResourceTypeInfo();
   }
+
+  @Override
+  public Set getClusterAttributes()
+  throws YarnException, IOException {
+return client.getClusterAttributes();
+  }
+
+  @Override
+  public Map getAttributesToNodes(
+  Set attributes) throws YarnException, IOException {
+return client.getAttributesToNodes(attributes);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efcca85f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 

hadoop git commit: HADOOP-15375. Branch-2 pre-commit failed to build docker image.

2018-04-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 3d2e327e2 -> 1ea4b2d34


HADOOP-15375. Branch-2 pre-commit failed to build docker image.

(cherry picked from commit f667ef1f65368d48d727f7a7bea00c4c06d8bbf4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea4b2d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea4b2d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea4b2d3

Branch: refs/heads/branch-2.9
Commit: 1ea4b2d34e68bad91c2ea60c629d47897bc55bad
Parents: 3d2e327
Author: Xiao Chen 
Authored: Mon Apr 9 15:34:40 2018 -0700
Committer: Xiao Chen 
Committed: Mon Apr 9 15:53:17 2018 -0700

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea4b2d3/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 352be24..b1fc420 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -140,6 +140,7 @@ ENV MAVEN_OPTS -Xms256m -Xmx1536m
 RUN apt-get -y install nodejs && \
 ln -s /usr/bin/nodejs /usr/bin/node && \
 apt-get -y install npm && \
+npm config set strict-ssl false && \
 npm install -g bower && \
 npm install -g ember-cli
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15375. Branch-2 pre-commit failed to build docker image.

2018-04-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 78ec00155 -> f667ef1f6


HADOOP-15375. Branch-2 pre-commit failed to build docker image.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f667ef1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f667ef1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f667ef1f

Branch: refs/heads/branch-2
Commit: f667ef1f65368d48d727f7a7bea00c4c06d8bbf4
Parents: 78ec001
Author: Xiao Chen 
Authored: Mon Apr 9 15:34:40 2018 -0700
Committer: Xiao Chen 
Committed: Mon Apr 9 15:35:10 2018 -0700

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f667ef1f/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 352be24..b1fc420 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -140,6 +140,7 @@ ENV MAVEN_OPTS -Xms256m -Xmx1536m
 RUN apt-get -y install nodejs && \
 ln -s /usr/bin/nodejs /usr/bin/node && \
 apt-get -y install npm && \
+npm config set strict-ssl false && \
 npm install -g bower && \
 npm install -g ember-cli
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7667. Docker Stop grace period should be configurable. Contributed by Eric Badger

2018-04-09 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 905937678 -> 907919d28


YARN-7667. Docker Stop grace period should be configurable. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/907919d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/907919d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/907919d2

Branch: refs/heads/trunk
Commit: 907919d28c1b7e4496d189b46ecbb86a10d41339
Parents: 9059376
Author: Jason Lowe 
Authored: Mon Apr 9 17:19:21 2018 -0500
Committer: Jason Lowe 
Committed: Mon Apr 9 17:19:21 2018 -0500

--
 .../apache/hadoop/yarn/conf/YarnConfiguration.java| 14 ++
 .../src/main/resources/yarn-default.xml   |  8 
 .../linux/runtime/DockerLinuxContainerRuntime.java|  8 +++-
 .../linux/runtime/TestDockerContainerRuntime.java | 14 +++---
 4 files changed, 40 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2590b6f..d2a71bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1951,6 +1951,20 @@ public class YarnConfiguration extends Configuration {
*/
   public static final boolean DEFAULT_NM_DOCKER_ALLOW_DELAYED_REMOVAL = false;
 
+  /**
+   * A configurable value to pass to the Docker Stop command. This value
+   * defines the number of seconds between the docker stop command sending
+   * a SIGTERM and a SIGKILL.
+   */
+  public static final String NM_DOCKER_STOP_GRACE_PERIOD =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "stop.grace-period";
+
+  /**
+   * The default value for the grace period between the SIGTERM and the
+   * SIGKILL in the Docker Stop command.
+   */
+  public static final int DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD = 10;
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 81b6658..def0816 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1787,6 +1787,14 @@
   
 
   
+A configurable value to pass to the Docker Stop command. This 
value
+  defines the number of seconds between the docker stop command sending
+  a SIGTERM and a SIGKILL.
+yarn.nodemanager.runtime.linux.docker.stop.grace-period
+10
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 0290493..132ae38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 

[hadoop] Git Push Summary

2018-04-09 Thread shv
Repository: hadoop
Updated Tags:  refs/tags/release-2.7.6-RC0 [created] 4f23339dd

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Set the release date for 2.7.6-RC0

2018-04-09 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7.6 41ebe07a9 -> d4edbac59


Set the release date for 2.7.6-RC0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4edbac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4edbac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4edbac5

Branch: refs/heads/branch-2.7.6
Commit: d4edbac59344e916305cfeaa816373c2d9f1d774
Parents: 41ebe07
Author: Konstantin V Shvachko 
Authored: Mon Apr 9 13:32:44 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Mon Apr 9 13:32:44 2018 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edbac5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bb7eeba..3b13448 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop Change Log
 
-Release 2.7.6 - UNRELEASED
+Release 2.7.6 - 2018-04-09
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edbac5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 85078ad..2cf5ca1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop HDFS Change Log
 
-Release 2.7.6 - UNRELEASED
+Release 2.7.6 - 2018-04-09
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edbac5/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index bd8971f..6fe9312 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop MapReduce Change Log
 
-Release 2.7.6 - UNRELEASED
+Release 2.7.6 - 2018-04-09
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4edbac5/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 786e3d1..85b1aa6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop YARN Change Log
 
-Release 2.7.6 - UNRELEASED
+Release 2.7.6 - 2018-04-09
 
   INCOMPATIBLE CHANGES
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13174. Add more debug logs for delegation tokens and authentication.

2018-04-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2e63f67b4 -> 5f8ab3a6b


HADOOP-13174. Add more debug logs for delegation tokens and authentication.

(cherry picked from commit 4a56bde6ba1f72588a25cd96acc76089706cb786)

Conflicts:

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
(cherry picked from commit ffaf24e308506ec4c27104bf6b3769328e55c1c6)

 Conflicts:

hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f8ab3a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f8ab3a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f8ab3a6

Branch: refs/heads/branch-2.8
Commit: 5f8ab3a6b7080991c19e1e9562f28301053680f4
Parents: 2e63f67
Author: Xiao Chen 
Authored: Thu Jun 8 21:27:06 2017 -0700
Committer: Xiao Chen 
Committed: Mon Apr 9 12:06:45 2018 -0700

--
 .../authentication/client/AuthenticatedURL.java |  1 +
 .../authentication/server/AuthenticationFilter.java | 12 ++--
 .../authentication/server/TestAuthenticationFilter.java |  1 +
 .../apache/hadoop/crypto/key/kms/KMSClientProvider.java |  6 ++
 .../AbstractDelegationTokenSecretManager.java   |  1 +
 .../delegation/web/DelegationTokenAuthenticatedURL.java | 11 +++
 .../web/DelegationTokenAuthenticationHandler.java   |  8 
 .../delegation/web/DelegationTokenAuthenticator.java| 11 +++
 8 files changed, 49 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f8ab3a6/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index e772d63..d037377 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -388,6 +388,7 @@ public class AuthenticatedURL {
   // not opened via this instance.
   token.cookieHandler.put(null, conn.getHeaderFields());
 } else {
+  LOG.trace("Setting token value to null ({}), resp={}", token, respCode);
   token.set(null);
   throw new AuthenticationException("Authentication failed, status: " + 
conn.getResponseCode() +
 ", message: " + 
conn.getResponseMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f8ab3a6/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 264d991..22b5a86 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -516,6 +516,10 @@ public class AuthenticationFilter implements Filter {
   AuthenticationToken token;
   try {
 token = getToken(httpRequest);
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Got token {} from httpRequest {}", token,
+  getRequestURL(httpRequest));
+}
   }
   catch (AuthenticationException ex) {
 LOG.warn("AuthenticationToken ignored: " + ex.getMessage());
@@ -526,8 +530,8 @@ public class AuthenticationFilter implements Filter {
   if (authHandler.managementOperation(token, httpRequest, httpResponse)) {
 if (token == null) {
   if (LOG.isDebugEnabled()) {
-LOG.debug("Request [{}] triggering authentication",
-getRequestURL(httpRequest));
+LOG.debug("Request [{}] triggering authentication. handler: {}",
+getRequestURL(httpRequest), authHandler.getClass());
   }
   token = 

[1/2] hadoop git commit: HADOOP-14029. Fix KMSClientProvider for non-secure proxyuser use case. Contributed by Xiaoyu Yao.

2018-04-09 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 04fbda4ba -> 2e63f67b4


HADOOP-14029. Fix KMSClientProvider for non-secure proxyuser use case. 
Contributed by Xiaoyu Yao.

(cherry picked from commit 2034315763cd7b1eb77e96c719918fc14e2dabf6)
(cherry picked from commit 6b602c6e3496d36a15a1f633c67f1a0e76e38b7d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e63f67b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e63f67b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e63f67b

Branch: refs/heads/branch-2.8
Commit: 2e63f67b45658e0974e316dbaa202dcd664df8c4
Parents: 9fd4e8d
Author: Xiaoyu Yao 
Authored: Thu Jan 26 20:34:32 2017 -0800
Committer: Xiao Chen 
Committed: Mon Apr 9 11:39:08 2018 -0700

--
 .../apache/hadoop/crypto/key/kms/KMSClientProvider.java  | 11 ++-
 .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java |  6 +-
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e63f67b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 0f2a6e2..ecb272f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -1059,13 +1059,14 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   // Use real user for proxy user
   actualUgi = currentUgi.getRealUser();
 }
-
-if (!containsKmsDt(actualUgi) &&
+if (UserGroupInformation.isSecurityEnabled() &&
+!containsKmsDt(actualUgi) &&
 !actualUgi.hasKerberosCredentials()) {
-  // Use login user for user that does not have either
+  // Use login user is only necessary when Kerberos is enabled
+  // but the actual user does not have either
   // Kerberos credential or KMS delegation token for KMS operations
-  LOG.debug("using loginUser no KMS Delegation Token "
-  + "no Kerberos Credentials");
+  LOG.debug("Using loginUser when Kerberos is enabled but the actual user" 
+
+  " does not have either KMS Delegation Token or Kerberos 
Credentials");
   actualUgi = UserGroupInformation.getLoginUser();
 }
 return actualUgi;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e63f67b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index e03887f..308c974 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -2285,7 +2285,11 @@ public class TestKMS {
 
   public void doWebHDFSProxyUserTest(final boolean kerberos) throws Exception {
 Configuration conf = new Configuration();
-conf.set("hadoop.security.authentication", "kerberos");
+if (kerberos) {
+  conf.set("hadoop.security.authentication", "kerberos");
+}
+UserGroupInformation.setConfiguration(conf);
+
 final File testDir = getTestDir();
 conf = createBaseKMSConf(testDir, conf);
 if (kerberos) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-13988. KMSClientProvider does not work with WebHDFS and Apache Knox w/ProxyUser. Contributed by Greg Senia and Xiaoyu Yao.

2018-04-09 Thread xiao
HADOOP-13988. KMSClientProvider does not work with WebHDFS and Apache Knox 
w/ProxyUser. Contributed by Greg Senia and Xiaoyu Yao.

(cherry picked from commit a46933e8ce4c1715c11e3e3283bf0e8c2b53b837)
(cherry picked from commit 9fa98cc45e7562b0c6ca56851a60e1930a437e17)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fd4e8d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fd4e8d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fd4e8d5

Branch: refs/heads/branch-2.8
Commit: 9fd4e8d5ee0e9a617df46a7c3eaa157b88a6c63c
Parents: 04fbda4
Author: Xiaoyu Yao 
Authored: Wed Jan 25 13:26:50 2017 -0800
Committer: Xiao Chen 
Committed: Mon Apr 9 11:39:08 2018 -0700

--
 .../hadoop/crypto/key/kms/KMSClientProvider.java   | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fd4e8d5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index c02a67c..0f2a6e2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -1033,10 +1033,9 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 return tokens;
   }
 
-  private boolean currentUgiContainsKmsDt() throws IOException {
-// Add existing credentials from current UGI, since provider is cached.
-Credentials creds = UserGroupInformation.getCurrentUser().
-getCredentials();
+  private boolean containsKmsDt(UserGroupInformation ugi) throws IOException {
+// Add existing credentials from the UGI, since provider is cached.
+Credentials creds = ugi.getCredentials();
 if (!creds.getAllTokens().isEmpty()) {
   LOG.debug("Searching for token that matches service: {}", dtService);
   org.apache.hadoop.security.token.Token
@@ -1059,11 +1058,15 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
 if (currentUgi.getRealUser() != null) {
   // Use real user for proxy user
   actualUgi = currentUgi.getRealUser();
-} else if (!currentUgiContainsKmsDt() &&
-!currentUgi.hasKerberosCredentials()) {
+}
+
+if (!containsKmsDt(actualUgi) &&
+!actualUgi.hasKerberosCredentials()) {
   // Use login user for user that does not have either
   // Kerberos credential or KMS delegation token for KMS operations
-  actualUgi = currentUgi.getLoginUser();
+  LOG.debug("using loginUser no KMS Delegation Token "
+  + "no Kerberos Credentials");
+  actualUgi = UserGroupInformation.getLoginUser();
 }
 return actualUgi;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: Updated branch-3.1 to track 3.1.1

2018-04-09 Thread wangda
Updated branch-3.1 to track 3.1.1

Change-Id: Idd55c79921b8acacc8a8a3258450e476f3abb706


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25bf1d3a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25bf1d3a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25bf1d3a

Branch: refs/heads/branch-3.1
Commit: 25bf1d3ac6fd085601768b459707c10f062943c8
Parents: 1c9038b
Author: Wangda Tan 
Authored: Mon Apr 9 10:58:50 2018 -0700
Committer: Wangda Tan 
Committed: Mon Apr 9 10:58:50 2018 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-uploader/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-fs2img/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml   

[1/2] hadoop git commit: Updated branch-3.1 to track 3.1.1

2018-04-09 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 1c9038bae -> 25bf1d3ac


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25bf1d3a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
index e01d406..4974148 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-1/pom.xml
@@ -22,13 +22,13 @@
   
 hadoop-yarn-server-timelineservice-hbase-server
 org.apache.hadoop
-3.1.0-SNAPSHOT
+3.1.1-SNAPSHOT
   
 
   4.0.0
   hadoop-yarn-server-timelineservice-hbase-server-1
   Apache Hadoop YARN TimelineService HBase Server 1.2
-  3.1.0-SNAPSHOT
+  3.1.1-SNAPSHOT
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25bf1d3a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
index 80a8222..21610e8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
@@ -22,13 +22,13 @@
   
 hadoop-yarn-server-timelineservice-hbase-server
 org.apache.hadoop
-3.1.0-SNAPSHOT
+3.1.1-SNAPSHOT
   
   4.0.0
 
   hadoop-yarn-server-timelineservice-hbase-server-2
   Apache Hadoop YARN TimelineService HBase Server 2.0
-  3.1.0-SNAPSHOT
+  3.1.1-SNAPSHOT
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25bf1d3a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
index ab48ff1..9d045ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
@@ -22,12 +22,12 @@
   
 hadoop-yarn-server-timelineservice-hbase
 org.apache.hadoop
-3.1.0-SNAPSHOT
+3.1.1-SNAPSHOT
   
   4.0.0
 
   hadoop-yarn-server-timelineservice-hbase-server
-  3.1.0-SNAPSHOT
+  3.1.1-SNAPSHOT
   Apache Hadoop YARN TimelineService HBase Servers
   pom
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25bf1d3a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
index e0110ba..24a3f34 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml

hadoop git commit: HDFS-13376. Specify minimum GCC version to avoid TLS support error in Build of hadoop-hdfs-native-client. Contributed by LiXin Ge.

2018-04-09 Thread jhc
Repository: hadoop
Updated Branches:
  refs/heads/trunk e9b9f48da -> 905937678


HDFS-13376. Specify minimum GCC version to avoid TLS support error in Build of 
hadoop-hdfs-native-client.  Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90593767
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90593767
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90593767

Branch: refs/heads/trunk
Commit: 905937678577fc0deb57489590863464562088ad
Parents: e9b9f48
Author: James Clampffer 
Authored: Mon Apr 9 13:48:42 2018 -0400
Committer: James Clampffer 
Committed: Mon Apr 9 13:48:42 2018 -0400

--
 BUILDING.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90593767/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 3b9a2ef..9727004 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -10,6 +10,8 @@ Requirements:
 * CMake 3.1 or newer (if compiling native code)
 * Zlib devel (if compiling native code)
 * Cyrus SASL devel (if compiling native code)
+* One of the compilers that support thread_local storage: GCC 4.8.1 or later, 
Visual Studio,
+  Clang (community version), Clang (version for iOS 9 and later) (if compiling 
native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS 
encryption performance)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling 
fuse_dfs)
 * Jansson C XML parsing library ( if compiling libwebhdfs )


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. Contributed by Yiqun Lin.

2018-04-09 Thread inigoiri
HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. 
Contributed by Yiqun Lin.

(cherry picked from commit e9b9f48dad5ebb58ee529f918723089e8356c480)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78ec0015
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78ec0015
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78ec0015

Branch: refs/heads/branch-2
Commit: 78ec00155ecf05b213864081df12d0ed659e40ef
Parents: 7e69242
Author: Inigo Goiri 
Authored: Mon Apr 9 10:09:25 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 10:11:38 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 30 
 .../federation/router/TestRouterQuota.java  |  4 +++
 2 files changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78ec0015/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index bbae3ba..d7328fe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -882,7 +882,8 @@ public class RouterRpcServer extends AbstractService
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -903,7 +904,8 @@ public class RouterRpcServer extends AbstractService
   final Options.Rename... options) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -980,7 +982,8 @@ public class RouterRpcServer extends AbstractService
   public boolean delete(String src, boolean recursive) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List locations = getLocationsForPath(src, true);
+final List locations =
+getLocationsForPath(src, true, false);
 RemoteMethod method = new RemoteMethod("delete",
 new Class[] {String.class, boolean.class}, new RemoteParam(),
 recursive);
@@ -2081,14 +2084,29 @@ public class RouterRpcServer extends AbstractService
 
   /**
* Get the possible locations of a path in the federated cluster.
+   * During the get operation, it will do the quota verification.
+   *
+   * @param path Path to check.
+   * @param failIfLocked Fail the request if locked (top mount point).
+   * @return Prioritized list of locations in the federated cluster.
+   * @throws IOException If the location for this path cannot be determined.
+   */
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked) throws IOException {
+return getLocationsForPath(path, failIfLocked, true);
+  }
+
+  /**
+   * Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
+   * @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
-  protected List getLocationsForPath(
-  String path, boolean failIfLocked) throws IOException {
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked, boolean needQuotaVerify) throws IOException {
 try {
   // Check the location for this path
   final PathLocation location =
@@ -2109,7 +2127,7 @@ public class RouterRpcServer extends AbstractService
 }
 
 // Check quota
-if (this.router.isQuotaEnabled()) {
+if (this.router.isQuotaEnabled() && needQuotaVerify) {
   RouterQuotaUsage quotaUsage = this.router.getQuotaManager()
   .getQuotaUsage(path);
   if 

[1/3] hadoop git commit: HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. Contributed by Yiqun Lin.

2018-04-09 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7e692425d -> 78ec00155
  refs/heads/branch-3.1 8c9dab978 -> 1c9038bae
  refs/heads/trunk ac32b3576 -> e9b9f48da


HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9b9f48d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9b9f48d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9b9f48d

Branch: refs/heads/trunk
Commit: e9b9f48dad5ebb58ee529f918723089e8356c480
Parents: ac32b35
Author: Inigo Goiri 
Authored: Mon Apr 9 10:09:25 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 10:09:25 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 30 
 .../federation/router/TestRouterQuota.java  |  4 +++
 2 files changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9b9f48d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 1159289..e6d2f5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -900,7 +900,8 @@ public class RouterRpcServer extends AbstractService
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -921,7 +922,8 @@ public class RouterRpcServer extends AbstractService
   final Options.Rename... options) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -998,7 +1000,8 @@ public class RouterRpcServer extends AbstractService
   public boolean delete(String src, boolean recursive) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List locations = getLocationsForPath(src, true);
+final List locations =
+getLocationsForPath(src, true, false);
 RemoteMethod method = new RemoteMethod("delete",
 new Class[] {String.class, boolean.class}, new RemoteParam(),
 recursive);
@@ -2213,14 +2216,29 @@ public class RouterRpcServer extends AbstractService
 
   /**
* Get the possible locations of a path in the federated cluster.
+   * During the get operation, it will do the quota verification.
+   *
+   * @param path Path to check.
+   * @param failIfLocked Fail the request if locked (top mount point).
+   * @return Prioritized list of locations in the federated cluster.
+   * @throws IOException If the location for this path cannot be determined.
+   */
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked) throws IOException {
+return getLocationsForPath(path, failIfLocked, true);
+  }
+
+  /**
+   * Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
+   * @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
-  protected List getLocationsForPath(
-  String path, boolean failIfLocked) throws IOException {
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked, boolean needQuotaVerify) throws IOException {
 try {
   // Check the location for this path
   final PathLocation location =
@@ -2241,7 +2259,7 @@ public class RouterRpcServer extends AbstractService
 }
 
 // Check quota
-if (this.router.isQuotaEnabled()) {
+if (this.router.isQuotaEnabled() && needQuotaVerify) {
   

[2/3] hadoop git commit: HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. Contributed by Yiqun Lin.

2018-04-09 Thread inigoiri
HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. 
Contributed by Yiqun Lin.

(cherry picked from commit e9b9f48dad5ebb58ee529f918723089e8356c480)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c9038ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c9038ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c9038ba

Branch: refs/heads/branch-3.1
Commit: 1c9038bae51c9cbceb5894795caf1d2634b35744
Parents: 8c9dab9
Author: Inigo Goiri 
Authored: Mon Apr 9 10:09:25 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 10:09:55 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 30 
 .../federation/router/TestRouterQuota.java  |  4 +++
 2 files changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c9038ba/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 1159289..e6d2f5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -900,7 +900,8 @@ public class RouterRpcServer extends AbstractService
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -921,7 +922,8 @@ public class RouterRpcServer extends AbstractService
   final Options.Rename... options) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -998,7 +1000,8 @@ public class RouterRpcServer extends AbstractService
   public boolean delete(String src, boolean recursive) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List locations = getLocationsForPath(src, true);
+final List locations =
+getLocationsForPath(src, true, false);
 RemoteMethod method = new RemoteMethod("delete",
 new Class[] {String.class, boolean.class}, new RemoteParam(),
 recursive);
@@ -2213,14 +2216,29 @@ public class RouterRpcServer extends AbstractService
 
   /**
* Get the possible locations of a path in the federated cluster.
+   * During the get operation, it will do the quota verification.
+   *
+   * @param path Path to check.
+   * @param failIfLocked Fail the request if locked (top mount point).
+   * @return Prioritized list of locations in the federated cluster.
+   * @throws IOException If the location for this path cannot be determined.
+   */
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked) throws IOException {
+return getLocationsForPath(path, failIfLocked, true);
+  }
+
+  /**
+   * Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
+   * @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
-  protected List getLocationsForPath(
-  String path, boolean failIfLocked) throws IOException {
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked, boolean needQuotaVerify) throws IOException {
 try {
   // Check the location for this path
   final PathLocation location =
@@ -2241,7 +2259,7 @@ public class RouterRpcServer extends AbstractService
 }
 
 // Check quota
-if (this.router.isQuotaEnabled()) {
+if (this.router.isQuotaEnabled() && needQuotaVerify) {
   RouterQuotaUsage quotaUsage = this.router.getQuotaManager()
   .getQuotaUsage(path);
   if 

[1/4] hadoop git commit: HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun.

2018-04-09 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 14827da74 -> 7e692425d
  refs/heads/branch-2.9 b46e9bb4b -> 3d2e327e2
  refs/heads/branch-3.0 952bb0f37 -> 877f963a0
  refs/heads/branch-3.1 8311fcc75 -> 8c9dab978


HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the 
time. Contributed by Jinglun.

(cherry picked from commit ac32b3576da4cc463dff85118163ccfff02215fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c9dab97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c9dab97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c9dab97

Branch: refs/heads/branch-3.1
Commit: 8c9dab978e2bdcbd6558c9e12b177c5bdca04393
Parents: 8311fcc
Author: Inigo Goiri 
Authored: Mon Apr 9 09:16:48 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 09:17:35 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 ++
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c9dab97/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 7b9cd64..1c38791 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -79,6 +79,9 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
+  if (currentUsedProxy != null) {
+return method.invoke(currentUsedProxy.proxy, args);
+  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c9dab97/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 8d6b02d..4b3fdf9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -43,10 +43,13 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -100,6 +103,37 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testRequestNNAfterOneSuccess() throws Exception {
+final AtomicInteger count = new AtomicInteger(0);
+final ClientProtocol goodMock = mock(ClientProtocol.class);
+when(goodMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+Thread.sleep(1000);
+return new long[]{1};
+  }
+});
+final ClientProtocol badMock = mock(ClientProtocol.class);
+when(badMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+throw new IOException("Bad mock !!");
+  }
+});
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
+createFactory(badMock, goodMock, goodMock, badMock));
+ClientProtocol proxy = provider.getProxy().proxy;
+proxy.getStats();
+assertEquals(2, count.get());
+proxy.getStats();
+assertEquals(3, count.get());
+  }
+
+  @Test
   

[3/4] hadoop git commit: HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun.

2018-04-09 Thread inigoiri
HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the 
time. Contributed by Jinglun.

(cherry picked from commit ac32b3576da4cc463dff85118163ccfff02215fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e692425
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e692425
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e692425

Branch: refs/heads/branch-2
Commit: 7e692425d538454abf69b07f6e8fd686a1171ac8
Parents: 14827da
Author: Inigo Goiri 
Authored: Mon Apr 9 09:16:48 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 09:18:42 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 ++
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e692425/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 010e9e5..f34adce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -78,6 +78,9 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
+  if (currentUsedProxy != null) {
+return method.invoke(currentUsedProxy.proxy, args);
+  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e692425/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 65fbbf8..a8a5c6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,10 +42,13 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -99,6 +102,37 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testRequestNNAfterOneSuccess() throws Exception {
+final AtomicInteger count = new AtomicInteger(0);
+final ClientProtocol goodMock = mock(ClientProtocol.class);
+when(goodMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+Thread.sleep(1000);
+return new long[]{1};
+  }
+});
+final ClientProtocol badMock = mock(ClientProtocol.class);
+when(badMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+throw new IOException("Bad mock !!");
+  }
+});
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
+createFactory(badMock, goodMock, goodMock, badMock));
+ClientProtocol proxy = provider.getProxy().proxy;
+proxy.getStats();
+assertEquals(2, count.get());
+proxy.getStats();
+assertEquals(3, count.get());
+  }
+
+  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {



[2/4] hadoop git commit: HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun.

2018-04-09 Thread inigoiri
HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the 
time. Contributed by Jinglun.

(cherry picked from commit ac32b3576da4cc463dff85118163ccfff02215fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/877f963a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/877f963a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/877f963a

Branch: refs/heads/branch-3.0
Commit: 877f963a059d21e942b0b5cbff5f60ce23c5b5fe
Parents: 952bb0f
Author: Inigo Goiri 
Authored: Mon Apr 9 09:16:48 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 09:18:03 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 ++
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/877f963a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index b94e94d..814a091 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -76,6 +76,9 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
+  if (currentUsedProxy != null) {
+return method.invoke(currentUsedProxy.proxy, args);
+  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/877f963a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 04e77ad..3c46f52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,10 +42,13 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -99,6 +102,37 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testRequestNNAfterOneSuccess() throws Exception {
+final AtomicInteger count = new AtomicInteger(0);
+final ClientProtocol goodMock = mock(ClientProtocol.class);
+when(goodMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+Thread.sleep(1000);
+return new long[]{1};
+  }
+});
+final ClientProtocol badMock = mock(ClientProtocol.class);
+when(badMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+throw new IOException("Bad mock !!");
+  }
+});
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
+createFactory(badMock, goodMock, goodMock, badMock));
+ClientProtocol proxy = provider.getProxy().proxy;
+proxy.getStats();
+assertEquals(2, count.get());
+proxy.getStats();
+assertEquals(3, count.get());
+  }
+
+  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {



[4/4] hadoop git commit: HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun.

2018-04-09 Thread inigoiri
HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the 
time. Contributed by Jinglun.

(cherry picked from commit ac32b3576da4cc463dff85118163ccfff02215fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d2e327e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d2e327e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d2e327e

Branch: refs/heads/branch-2.9
Commit: 3d2e327e2f99bac161fa8f00e93b5b6edece2a65
Parents: b46e9bb
Author: Inigo Goiri 
Authored: Mon Apr 9 09:16:48 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 09:19:04 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 ++
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2e327e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 49fe4be..b9f213e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -76,6 +76,9 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
+  if (currentUsedProxy != null) {
+return method.invoke(currentUsedProxy.proxy, args);
+  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d2e327e/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 04e77ad..3c46f52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,10 +42,13 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -99,6 +102,37 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testRequestNNAfterOneSuccess() throws Exception {
+final AtomicInteger count = new AtomicInteger(0);
+final ClientProtocol goodMock = mock(ClientProtocol.class);
+when(goodMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+Thread.sleep(1000);
+return new long[]{1};
+  }
+});
+final ClientProtocol badMock = mock(ClientProtocol.class);
+when(badMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+throw new IOException("Bad mock !!");
+  }
+});
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
+createFactory(badMock, goodMock, goodMock, badMock));
+ClientProtocol proxy = provider.getProxy().proxy;
+proxy.getStats();
+assertEquals(2, count.get());
+proxy.getStats();
+assertEquals(3, count.get());
+  }
+
+  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {



hadoop git commit: HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun.

2018-04-09 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 821b0de4c -> ac32b3576


HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the 
time. Contributed by Jinglun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac32b357
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac32b357
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac32b357

Branch: refs/heads/trunk
Commit: ac32b3576da4cc463dff85118163ccfff02215fc
Parents: 821b0de
Author: Inigo Goiri 
Authored: Mon Apr 9 09:16:48 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 09:16:48 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 ++
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32b357/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 7b9cd64..1c38791 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -79,6 +79,9 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
+  if (currentUsedProxy != null) {
+return method.invoke(currentUsedProxy.proxy, args);
+  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32b357/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 8d6b02d..4b3fdf9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -43,10 +43,13 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -100,6 +103,37 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testRequestNNAfterOneSuccess() throws Exception {
+final AtomicInteger count = new AtomicInteger(0);
+final ClientProtocol goodMock = mock(ClientProtocol.class);
+when(goodMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+Thread.sleep(1000);
+return new long[]{1};
+  }
+});
+final ClientProtocol badMock = mock(ClientProtocol.class);
+when(badMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+throw new IOException("Bad mock !!");
+  }
+});
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
+createFactory(badMock, goodMock, goodMock, badMock));
+ClientProtocol proxy = provider.getProxy().proxy;
+proxy.getStats();
+assertEquals(2, count.get());
+proxy.getStats();
+assertEquals(3, count.get());
+  }
+
+  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {



[2/2] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-09 Thread sunilg
YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. 
Contributed by Suma Shivaprasad.

(cherry picked from commit 821b0de4c59156d4a65112de03ba3e7e1c88e309)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8311fcc7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8311fcc7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8311fcc7

Branch: refs/heads/branch-3.1
Commit: 8311fcc75b7432d316ed61285c4b75c3f512c727
Parents: 091db4d
Author: Sunil G 
Authored: Mon Apr 9 21:17:22 2018 +0530
Committer: Sunil G 
Committed: Mon Apr 9 21:17:56 2018 +0530

--
 .../server/resourcemanager/RMServerUtils.java   |   5 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  47 ++
 .../resourcemanager/scheduler/Allocation.java   |  12 +
 .../scheduler/SchedulerUtils.java   |  33 +-
 .../capacity/AutoCreatedLeafQueue.java  |   3 +-
 .../AutoCreatedQueueManagementPolicy.java   |  12 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../CapacitySchedulerConfiguration.java |  28 +
 .../scheduler/capacity/LeafQueue.java   |  11 +
 .../scheduler/capacity/ManagedParentQueue.java  |   5 +-
 .../GuaranteedOrZeroCapacityOverTimePolicy.java | 573 +++
 .../placement/PendingAskUpdateResult.java   |   8 +
 .../yarn/server/resourcemanager/MockNM.java |  15 +
 .../server/resourcemanager/TestAppManager.java  |  20 +-
 ...stCapacitySchedulerAutoCreatedQueueBase.java | 241 +---
 .../TestCapacitySchedulerAutoQueueCreation.java | 233 +---
 .../TestQueueManagementDynamicEditPolicy.java   |  30 +-
 17 files changed, 834 insertions(+), 444 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8311fcc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 33451295..ab6bbcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -236,13 +236,14 @@ public class RMServerUtils {
*/
   public static void normalizeAndValidateRequests(List ask,
   Resource maximumResource, String queueName, YarnScheduler scheduler,
-  RMContext rmContext)
-  throws InvalidResourceRequestException {
+  RMContext rmContext) throws InvalidResourceRequestException {
 // Get queue from scheduler
 QueueInfo queueInfo = null;
 try {
   queueInfo = scheduler.getQueueInfo(queueName, false, false);
 } catch (IOException e) {
+  //Queue may not exist since it could be auto-created in case of
+  // dynamic queues
 }
 
 for (ResourceRequest resReq : ask) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8311fcc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index c23b135..1b1e2c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
 

[1/2] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-09 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 091db4d0e -> 8311fcc75


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8311fcc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
index 6c6ac20..addec66 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.security.TestGroupsCaching;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -65,6 +66,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
 .SimpleGroupsMapping;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
 import org.junit.Assert;
@@ -89,6 +92,8 @@ import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.DOT;
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.FAIR_APP_ORDERING_POLICY;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.capacity.CapacitySchedulerConfiguration.ROOT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -99,7 +104,7 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   private static final Log LOG = LogFactory.getLog(
   TestCapacitySchedulerAutoCreatedQueueBase.class);
   public static final int GB = 1024;
-  public final static ContainerUpdates NULL_UPDATE_REQUESTS =
+  public static final ContainerUpdates NULL_UPDATE_REQUESTS =
   new ContainerUpdates();
 
   public static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
@@ -112,9 +117,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String B1 = B + ".b1";
   public static final String B2 = B + ".b2";
   public static final String B3 = B + ".b3";
-  public static final String C1 = C + ".c1";
-  public static final String C2 = C + ".c2";
-  public static final String C3 = C + ".c3";
   public static final float A_CAPACITY = 20f;
   public static final float B_CAPACITY = 40f;
   public static final float C_CAPACITY = 20f;
@@ -124,8 +126,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final float B1_CAPACITY = 60f;
   public static final float B2_CAPACITY = 20f;
   public static final float B3_CAPACITY = 20f;
-  public static final float C1_CAPACITY = 20f;
-  public static final float C2_CAPACITY = 20f;
 
   public static final int NODE_MEMORY = 16;
 
@@ -147,12 +147,14 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String NODEL_LABEL_GPU = "GPU";
   public static final String NODEL_LABEL_SSD = "SSD";
 
+  public static final float NODE_LABEL_GPU_TEMPLATE_CAPACITY = 30.0f;
+  public static final float NODEL_LABEL_SSD_TEMPLATE_CAPACITY = 40.0f;
+
   protected MockRM mockRM = null;
   protected MockNM nm1 = null;
   protected MockNM nm2 = null;
   protected MockNM nm3 = null;
   protected CapacityScheduler cs;
-  private final TestCapacityScheduler tcs = new TestCapacityScheduler();
   protected SpyDispatcher dispatcher;
   private static EventHandler rmAppEventEventHandler;
 
@@ -215,15 +217,29 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   }
 
   protected void setupNodes(MockRM newMockRM) throws Exception {
+NodeLabel ssdLabel = Records.newRecord(NodeLabel.class);
+ 

[2/2] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-09 Thread sunilg
YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. 
Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/821b0de4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/821b0de4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/821b0de4

Branch: refs/heads/trunk
Commit: 821b0de4c59156d4a65112de03ba3e7e1c88e309
Parents: 5700556
Author: Sunil G 
Authored: Mon Apr 9 21:17:22 2018 +0530
Committer: Sunil G 
Committed: Mon Apr 9 21:17:22 2018 +0530

--
 .../server/resourcemanager/RMServerUtils.java   |   5 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  47 ++
 .../resourcemanager/scheduler/Allocation.java   |  12 +
 .../scheduler/SchedulerUtils.java   |  33 +-
 .../capacity/AutoCreatedLeafQueue.java  |   3 +-
 .../AutoCreatedQueueManagementPolicy.java   |  12 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../CapacitySchedulerConfiguration.java |  28 +
 .../scheduler/capacity/LeafQueue.java   |  11 +
 .../scheduler/capacity/ManagedParentQueue.java  |   5 +-
 .../GuaranteedOrZeroCapacityOverTimePolicy.java | 573 +++
 .../placement/PendingAskUpdateResult.java   |   8 +
 .../yarn/server/resourcemanager/MockNM.java |  15 +
 .../server/resourcemanager/TestAppManager.java  |  20 +-
 ...stCapacitySchedulerAutoCreatedQueueBase.java | 241 +---
 .../TestCapacitySchedulerAutoQueueCreation.java | 233 +---
 .../TestQueueManagementDynamicEditPolicy.java   |  30 +-
 17 files changed, 834 insertions(+), 444 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 33451295..ab6bbcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -236,13 +236,14 @@ public class RMServerUtils {
*/
   public static void normalizeAndValidateRequests(List ask,
   Resource maximumResource, String queueName, YarnScheduler scheduler,
-  RMContext rmContext)
-  throws InvalidResourceRequestException {
+  RMContext rmContext) throws InvalidResourceRequestException {
 // Get queue from scheduler
 QueueInfo queueInfo = null;
 try {
   queueInfo = scheduler.getQueueInfo(queueName, false, false);
 } catch (IOException e) {
+  //Queue may not exist since it could be auto-created in case of
+  // dynamic queues
 }
 
 for (ResourceRequest resReq : ask) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index c23b135..1b1e2c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import 

[1/2] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-09 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5700556cd -> 821b0de4c


http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
index 6c6ac20..addec66 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.security.TestGroupsCaching;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -65,6 +66,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
 .SimpleGroupsMapping;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
 import org.junit.Assert;
@@ -89,6 +92,8 @@ import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.DOT;
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.FAIR_APP_ORDERING_POLICY;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.capacity.CapacitySchedulerConfiguration.ROOT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -99,7 +104,7 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   private static final Log LOG = LogFactory.getLog(
   TestCapacitySchedulerAutoCreatedQueueBase.class);
   public static final int GB = 1024;
-  public final static ContainerUpdates NULL_UPDATE_REQUESTS =
+  public static final ContainerUpdates NULL_UPDATE_REQUESTS =
   new ContainerUpdates();
 
   public static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
@@ -112,9 +117,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String B1 = B + ".b1";
   public static final String B2 = B + ".b2";
   public static final String B3 = B + ".b3";
-  public static final String C1 = C + ".c1";
-  public static final String C2 = C + ".c2";
-  public static final String C3 = C + ".c3";
   public static final float A_CAPACITY = 20f;
   public static final float B_CAPACITY = 40f;
   public static final float C_CAPACITY = 20f;
@@ -124,8 +126,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final float B1_CAPACITY = 60f;
   public static final float B2_CAPACITY = 20f;
   public static final float B3_CAPACITY = 20f;
-  public static final float C1_CAPACITY = 20f;
-  public static final float C2_CAPACITY = 20f;
 
   public static final int NODE_MEMORY = 16;
 
@@ -147,12 +147,14 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String NODEL_LABEL_GPU = "GPU";
   public static final String NODEL_LABEL_SSD = "SSD";
 
+  public static final float NODE_LABEL_GPU_TEMPLATE_CAPACITY = 30.0f;
+  public static final float NODEL_LABEL_SSD_TEMPLATE_CAPACITY = 40.0f;
+
   protected MockRM mockRM = null;
   protected MockNM nm1 = null;
   protected MockNM nm2 = null;
   protected MockNM nm3 = null;
   protected CapacityScheduler cs;
-  private final TestCapacityScheduler tcs = new TestCapacityScheduler();
   protected SpyDispatcher dispatcher;
   private static EventHandler rmAppEventEventHandler;
 
@@ -215,15 +217,29 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   }
 
   protected void setupNodes(MockRM newMockRM) throws Exception {
+NodeLabel ssdLabel = Records.newRecord(NodeLabel.class);
+

[Hadoop Wiki] Update of "Books" by Packt Publishing

2018-04-09 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "Books" page has been changed by Packt Publishing:
https://wiki.apache.org/hadoop/Books?action=diff=48=49

  
  === Modern Big Data Processing with Hadoop ===
  
- '''Name:'''  
[[https://www.packtpub.com/big-data-and-business-intelligence/modern-big-data-processing-hadoop|Modern
 Big Data Processing with Hadoop]]
+ '''Name:'''  [[https://www.amazon.com/dp/B0787KY8RH/|Modern Big Data 
Processing with Hadoop]]
  
  '''Author:''' V. Naresh Kumar, Prashant Shindgikar
  

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org