hadoop git commit: HDFS-11238. Fix checkstyle warnings in NameNode#createNameNode. Contributed by Ethan Li.

2017-02-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1e11080b7 -> 8acb376c9


HDFS-11238. Fix checkstyle warnings in NameNode#createNameNode. Contributed by 
Ethan Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8acb376c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8acb376c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8acb376c

Branch: refs/heads/trunk
Commit: 8acb376c9c5f7f52a097be221ed18877a403bece
Parents: 1e11080
Author: Akira Ajisaka 
Authored: Wed Feb 15 16:53:50 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 16:53:50 2017 +0900

--
 .../hadoop/hdfs/server/namenode/NameNode.java   | 101 +--
 1 file changed, 46 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8acb376c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index df5ee0f..1752cf7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -1579,62 +1579,53 @@ public class NameNode extends ReconfigurableBase 
implements
 }
 setStartupOption(conf, startOpt);
 
+boolean aborted = false;
 switch (startOpt) {
-  case FORMAT: {
-boolean aborted = format(conf, startOpt.getForceFormat(),
-startOpt.getInteractiveFormat());
-terminate(aborted ? 1 : 0);
-return null; // avoid javac warning
-  }
-  case GENCLUSTERID: {
-System.err.println("Generating new cluster id:");
-System.out.println(NNStorage.newClusterID());
-terminate(0);
-return null;
-  }
-  case ROLLBACK: {
-boolean aborted = doRollback(conf, true);
-terminate(aborted ? 1 : 0);
-return null; // avoid warning
-  }
-  case BOOTSTRAPSTANDBY: {
-String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length);
-int rc = BootstrapStandby.run(toolArgs, conf);
-terminate(rc);
-return null; // avoid warning
-  }
-  case INITIALIZESHAREDEDITS: {
-boolean aborted = initializeSharedEdits(conf,
-startOpt.getForceFormat(),
-startOpt.getInteractiveFormat());
-terminate(aborted ? 1 : 0);
-return null; // avoid warning
-  }
-  case BACKUP:
-  case CHECKPOINT: {
-NamenodeRole role = startOpt.toNodeRole();
-DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
-return new BackupNode(conf, role);
-  }
-  case RECOVER: {
-NameNode.doRecovery(startOpt, conf);
-return null;
-  }
-  case METADATAVERSION: {
-printMetadataVersion(conf);
-terminate(0);
-return null; // avoid javac warning
-  }
-  case UPGRADEONLY: {
-DefaultMetricsSystem.initialize("NameNode");
-new NameNode(conf);
-terminate(0);
-return null;
-  }
-  default: {
-DefaultMetricsSystem.initialize("NameNode");
-return new NameNode(conf);
-  }
+case FORMAT:
+  aborted = format(conf, startOpt.getForceFormat(),
+  startOpt.getInteractiveFormat());
+  terminate(aborted ? 1 : 0);
+  return null; // avoid javac warning
+case GENCLUSTERID:
+  System.err.println("Generating new cluster id:");
+  System.out.println(NNStorage.newClusterID());
+  terminate(0);
+  return null;
+case ROLLBACK:
+  aborted = doRollback(conf, true);
+  terminate(aborted ? 1 : 0);
+  return null; // avoid warning
+case BOOTSTRAPSTANDBY:
+  String[] toolArgs = Arrays.copyOfRange(argv, 1, argv.length);
+  int rc = BootstrapStandby.run(toolArgs, conf);
+  terminate(rc);
+  return null; // avoid warning
+case INITIALIZESHAREDEDITS:
+  aborted = initializeSharedEdits(conf,
+  startOpt.getForceFormat(),
+  startOpt.getInteractiveFormat());
+  terminate(aborted ? 1 : 0);
+  return null; // avoid warning
+case BACKUP:
+case CHECKPOINT:
+  NamenodeRole role = startOpt.toNodeRole();
+  DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
+  return new BackupNode(conf, role);
+case RECOVER:
+  NameNode.doRecovery(startOpt, conf);
+  return null;
+case METADATAVERSION:
+  printMetadataVersion(conf);
+  

hadoop git commit: HADOOP-13942. Update checkstyle and checkstyle plugin version to handle indentation of JDK8 Lambdas.

2017-02-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk fbc0c2bd7 -> 1e11080b7


HADOOP-13942. Update checkstyle and checkstyle plugin version to handle 
indentation of JDK8 Lambdas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e11080b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e11080b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e11080b

Branch: refs/heads/trunk
Commit: 1e11080b7825a2d0bafce91432009f585b7b5d21
Parents: fbc0c2b
Author: Akira Ajisaka 
Authored: Wed Feb 15 16:33:30 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 16:35:08 2017 +0900

--
 hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml | 4 +---
 pom.xml | 4 ++--
 2 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e11080b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
--
diff --git a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
index 851b57d..1b968ae 100644
--- a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
+++ b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
@@ -123,9 +123,7 @@
 
 
 
-
-  
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e11080b/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 2ca27c1..3eeba1e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -107,8 +107,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xs
 3.3.0
 2.5.0
 1.0.0
-2.15
-6.6
+2.17
+7.5.1
 1.4.3
 
 bash


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-4753. Use doxia macro to generate in-page TOC of YARN site documentation. (iwasakims)

2017-02-14 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 58acb676a -> cba7e2c34


YARN-4753. Use doxia macro to generate in-page TOC of YARN site documentation. 
(iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cba7e2c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cba7e2c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cba7e2c3

Branch: refs/heads/branch-2
Commit: cba7e2c3463e065b11064f920e6ffe383e2f68a5
Parents: 58acb67
Author: Masatake Iwasaki 
Authored: Wed Feb 15 13:37:25 2017 +0900
Committer: Masatake Iwasaki 
Committed: Wed Feb 15 13:37:25 2017 +0900

--
 .../src/site/markdown/CapacityScheduler.md  | 14 +-
 .../site/markdown/DockerContainerExecutor.md.vm |  7 +
 .../src/site/markdown/DockerContainers.md   |  8 +-
 .../src/site/markdown/FairScheduler.md  | 14 +-
 .../src/site/markdown/NodeLabel.md  | 13 +
 .../src/site/markdown/NodeManager.md|  8 +-
 .../src/site/markdown/NodeManagerCgroups.md |  3 +-
 .../src/site/markdown/NodeManagerRest.md|  8 +-
 .../site/markdown/OpportunisticContainers.md| 17 +---
 .../src/site/markdown/ReservationSystem.md  |  6 +---
 .../src/site/markdown/ResourceManagerHA.md  | 10 +--
 .../src/site/markdown/ResourceManagerRest.md| 29 ++--
 .../src/site/markdown/ResourceManagerRestart.md | 13 +
 .../src/site/markdown/SecureContainer.md|  2 +-
 .../src/site/markdown/TimelineServer.md | 16 +--
 .../src/site/markdown/WebApplicationProxy.md|  7 +
 .../src/site/markdown/WebServicesIntro.md   | 13 +
 .../site/markdown/WritingYarnApplications.md| 14 +-
 .../site/markdown/YarnApplicationSecurity.md|  2 ++
 .../src/site/markdown/YarnCommands.md   | 21 +-
 20 files changed, 22 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba7e2c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 9c9b03e..737bdc2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -15,19 +15,7 @@
 Hadoop: Capacity Scheduler
 ==
 
-* [Purpose](#Purpose)
-* [Overview](#Overview)
-* [Features](#Features)
-* [Configuration](#Configuration)
-* [Setting up `ResourceManager` to use 
`CapacityScheduler`](#Setting_up_ResourceManager_to_use_CapacityScheduler`)
-* [Setting up queues](#Setting_up_queues)
-* [Queue Properties](#Queue_Properties)
-* [Setup for application priority](#Setup_for_application_priority.)
-* [Capacity Scheduler container 
preemption](#Capacity_Scheduler_container_preemption)
-* [Configuring `ReservationSystem` with 
`CapacityScheduler`](#Configuring_ReservationSystem_with_CapacityScheduler)
-* [Other Properties](#Other_Properties)
-* [Reviewing the configuration of the 
CapacityScheduler](#Reviewing_the_configuration_of_the_CapacityScheduler)
-* [Changing Queue Configuration](#Changing_Queue_Configuration)
+
 
 Purpose
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba7e2c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainerExecutor.md.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainerExecutor.md.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainerExecutor.md.vm
index 1fa94ca..cbf636e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainerExecutor.md.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainerExecutor.md.vm
@@ -15,12 +15,7 @@
 Docker Container Executor
 =
 
-* [Overview](#Overview)
-* [Cluster Configuration](#Cluster_Configuration)
-* [Tips for connecting to a secure docker 
repository](#Tips_for_connecting_to_a_secure_docker_repository)
-* [Job Configuration](#Job_Configuration)
-* [Docker Image Requirements](#Docker_Image_Requirements)
-* [Working example of yarn launched docker 
containers](#Working_example_of_yarn_launched_docker_containers)
+
 
 DEPRECATED
 --


hadoop git commit: YARN-4753. Use doxia macro to generate in-page TOC of YARN site documentation. (iwasakims)

2017-02-14 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk 353a9b2d9 -> fbc0c2bd7


YARN-4753. Use doxia macro to generate in-page TOC of YARN site documentation. 
(iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbc0c2bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbc0c2bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbc0c2bd

Branch: refs/heads/trunk
Commit: fbc0c2bd763e3a3aad914eb9d60b05ad4ab2825f
Parents: 353a9b2
Author: Masatake Iwasaki 
Authored: Wed Feb 15 13:09:10 2017 +0900
Committer: Masatake Iwasaki 
Committed: Wed Feb 15 13:09:10 2017 +0900

--
 hadoop-project/src/site/site.xml|   2 +-
 .../src/site/markdown/CapacityScheduler.md  |  14 +--
 .../src/site/markdown/DockerContainers.md   |   8 +-
 .../src/site/markdown/FairScheduler.md  |  14 +--
 .../src/site/markdown/NodeLabel.md  |  14 +--
 .../src/site/markdown/NodeManager.md|   8 +-
 .../src/site/markdown/NodeManagerCgroups.md |   3 +-
 .../src/site/markdown/NodeManagerRest.md|   8 +-
 .../site/markdown/OpportunisticContainers.md|  17 +--
 .../src/site/markdown/ReservationSystem.md  |   6 +-
 .../src/site/markdown/ResourceManagerHA.md  |  10 +-
 .../src/site/markdown/ResourceManagerRest.md|  29 +
 .../src/site/markdown/ResourceManagerRestart.md |  13 +--
 .../src/site/markdown/SecureContainer.md|   2 +-
 .../src/site/markdown/TimelineServer.md |  16 +--
 .../src/site/markdown/TimelineServiceV2.md  | 110 ---
 .../src/site/markdown/WebApplicationProxy.md|   7 +-
 .../src/site/markdown/WebServicesIntro.md   |  13 +--
 .../site/markdown/WritingYarnApplications.md|  14 +--
 .../site/markdown/YarnApplicationSecurity.md|   2 +
 .../src/site/markdown/YarnCommands.md   |  26 +
 21 files changed, 69 insertions(+), 267 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0c2bd/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 618ad4c..ae3aef5 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -145,7 +145,7 @@
   
   
   
-  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0c2bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 9c9b03e..737bdc2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -15,19 +15,7 @@
 Hadoop: Capacity Scheduler
 ==
 
-* [Purpose](#Purpose)
-* [Overview](#Overview)
-* [Features](#Features)
-* [Configuration](#Configuration)
-* [Setting up `ResourceManager` to use 
`CapacityScheduler`](#Setting_up_ResourceManager_to_use_CapacityScheduler`)
-* [Setting up queues](#Setting_up_queues)
-* [Queue Properties](#Queue_Properties)
-* [Setup for application priority](#Setup_for_application_priority.)
-* [Capacity Scheduler container 
preemption](#Capacity_Scheduler_container_preemption)
-* [Configuring `ReservationSystem` with 
`CapacityScheduler`](#Configuring_ReservationSystem_with_CapacityScheduler)
-* [Other Properties](#Other_Properties)
-* [Reviewing the configuration of the 
CapacityScheduler](#Reviewing_the_configuration_of_the_CapacityScheduler)
-* [Changing Queue Configuration](#Changing_Queue_Configuration)
+
 
 Purpose
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc0c2bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index b74fa7b..e66d079 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -15,13 +15,7 @@
 Launching Applications Using Docker Containers
 ==
 
-* [Overview](#Overview)
-* [Cluster Configuration](#Cluster_Configuration)
-* [Docker Image 

[26/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-02-14 Thread subru
YARN-5676. Add a HashBasedRouterPolicy, and small policies and test 
refactoring. (Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fc338ed0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fc338ed0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fc338ed0

Branch: refs/heads/YARN-2915
Commit: fc338ed07ca40b7aba5591d8c2798b8b7a32e8b1
Parents: fbbeec4
Author: Subru Krishnan 
Authored: Tue Nov 22 15:02:22 2016 -0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +-
 .../policies/AbstractPolicyManager.java | 175 -
 .../policies/FederationPolicyManager.java   | 117 
 .../PriorityBroadcastPolicyManager.java |  66 ---
 .../federation/policies/RouterPolicyFacade.java |   1 +
 .../policies/UniformBroadcastPolicyManager.java |  56 --
 .../policies/WeightedLocalityPolicyManager.java |  67 ---
 .../policies/manager/AbstractPolicyManager.java | 190 +++
 .../manager/FederationPolicyManager.java| 118 
 .../manager/HashBroadcastPolicyManager.java |  38 
 .../manager/PriorityBroadcastPolicyManager.java |  66 +++
 .../manager/UniformBroadcastPolicyManager.java  |  44 +
 .../manager/WeightedLocalityPolicyManager.java  |  67 +++
 .../policies/manager/package-info.java  |  19 ++
 .../policies/router/AbstractRouterPolicy.java   |  19 ++
 .../policies/router/HashBasedRouterPolicy.java  |  81 
 .../policies/router/LoadBasedRouterPolicy.java  |   3 +
 .../policies/router/PriorityRouterPolicy.java   |   3 +
 .../router/UniformRandomRouterPolicy.java   |  10 +-
 .../router/WeightedRandomRouterPolicy.java  |   3 +
 .../policies/BaseFederationPoliciesTest.java|  17 +-
 .../policies/BasePolicyManagerTest.java | 108 ---
 ...ionPolicyInitializationContextValidator.java |   1 +
 .../TestPriorityBroadcastPolicyManager.java |  72 ---
 .../policies/TestRouterPolicyFacade.java|   2 +
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/manager/BasePolicyManagerTest.java | 104 ++
 .../TestHashBasedBroadcastPolicyManager.java|  40 
 .../TestPriorityBroadcastPolicyManager.java |  72 +++
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/router/BaseRouterPoliciesTest.java |  51 +
 .../router/TestHashBasedRouterPolicy.java   |  83 
 .../router/TestLoadBasedRouterPolicy.java   |   3 +-
 .../router/TestPriorityRouterPolicy.java|   3 +-
 .../router/TestUniformRandomRouterPolicy.java   |   3 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  15 +-
 38 files changed, 1160 insertions(+), 798 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc338ed0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index aca1177..1fbaa8c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2526,7 +2526,8 @@ public class YarnConfiguration extends Configuration {
   + "policy-manager";
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache"
-  + 
".hadoop.yarn.server.federation.policies.UniformBroadcastPolicyManager";
+  + ".hadoop.yarn.server.federation.policies"
+  + ".manager.UniformBroadcastPolicyManager";
 
   public static final String FEDERATION_POLICY_MANAGER_PARAMS =
   FEDERATION_PREFIX + "policy-manager-params";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc338ed0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
 

[08/50] [abbrv] hadoop git commit: HADOOP-13233. help of stat is confusing. Contributed by Attila Bukor.

2017-02-14 Thread subru
HADOOP-13233. help of stat is confusing. Contributed by Attila Bukor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc45da79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc45da79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc45da79

Branch: refs/heads/YARN-2915
Commit: cc45da79fda7dfba2795ac397d62f40a858dcdd9
Parents: 464ff47
Author: Wei-Chiu Chuang 
Authored: Mon Feb 13 10:14:45 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Mon Feb 13 10:14:45 2017 -0800

--
 .../src/main/java/org/apache/hadoop/fs/shell/Stat.java   | 4 ++--
 .../hadoop-common/src/site/markdown/FileSystemShell.md   | 2 +-
 .../hadoop-common/src/test/resources/testConf.xml| 2 +-
 3 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc45da79/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
index 42f7843..cf8270e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.fs.FileStatus;
  * Format sequences:
  *   %a: Permissions in octal
  *   %A: Permissions in symbolic style
- *   %b: Size of file in blocks
+ *   %b: Size of file in bytes
  *   %F: Type
  *   %g: Group name of owner
  *   %n: Filename
@@ -60,7 +60,7 @@ class Stat extends FsCommand {
 "Print statistics about the file/directory at " + NEWLINE +
 "in the specified format. Format accepts permissions in" + NEWLINE +
 "octal (%a) and symbolic (%A), filesize in" + NEWLINE +
-"blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
+"bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
 "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
 "of owner (%u), modification date (%y, %Y)." + NEWLINE +
 "%y shows UTC date as \"-MM-dd HH:mm:ss\" and" + NEWLINE +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc45da79/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
index 43fc28b..42fddc9 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
@@ -667,7 +667,7 @@ stat
 
 Usage: `hadoop fs -stat [format]  ...`
 
-Print statistics about the file/directory at \ in the specified format. 
Format accepts permissions in octal (%a) and symbolic (%A), filesize in blocks 
(%b), type (%F), group name of owner (%g), name (%n), block size (%o), 
replication (%r), user name of owner(%u), and modification date (%y, %Y). %y 
shows UTC date as "-MM-dd HH:mm:ss" and %Y shows milliseconds since January 
1, 1970 UTC. If the format is not specified, %y is used by default.
+Print statistics about the file/directory at \ in the specified format. 
Format accepts permissions in octal (%a) and symbolic (%A), filesize in bytes 
(%b), type (%F), group name of owner (%g), name (%n), block size (%o), 
replication (%r), user name of owner(%u), and modification date (%y, %Y). %y 
shows UTC date as "-MM-dd HH:mm:ss" and %Y shows milliseconds since January 
1, 1970 UTC. If the format is not specified, %y is used by default.
 
 Example:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc45da79/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml 
b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index d285f33..112aea0 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -867,7 +867,7 @@
 
 
   RegexpComparator
-  ^( |\t)*blocks \(%b\), type \(%F\), group name of 
owner \(%g\),( )*
+  ^( |\t)*bytes \(%b\), type \(%F\), group name of 
owner \(%g\),( )*
 
 
   RegexpComparator


-
To unsubscribe, e-mail: 

[14/50] [abbrv] hadoop git commit: YARN-5966. AMRMClient changes to support ExecutionType update. (asuresh)

2017-02-14 Thread subru
YARN-5966. AMRMClient changes to support ExecutionType update. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aaf106fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aaf106fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aaf106fd

Branch: refs/heads/YARN-2915
Commit: aaf106fde35ec97e2e2ea4d7a67434038c4273ac
Parents: 4164a20
Author: Arun Suresh 
Authored: Tue Feb 14 06:08:27 2017 -0800
Committer: Arun Suresh 
Committed: Tue Feb 14 06:09:10 2017 -0800

--
 .../yarn/api/records/UpdateContainerError.java  |  19 +-
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../hadoop/yarn/client/api/AMRMClient.java  |  33 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  33 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   7 +-
 .../yarn/client/api/impl/AMRMClientImpl.java| 111 +++--
 .../yarn/client/api/impl/TestAMRMClient.java|  60 ++-
 .../api/impl/TestAMRMClientOnRMRestart.java |   8 +-
 .../TestOpportunisticContainerAllocation.java   | 400 +--
 .../impl/pb/UpdateContainerErrorPBImpl.java |  16 +
 .../server/resourcemanager/RMServerUtils.java   |  14 +-
 ...pportunisticContainerAllocatorAMService.java |   5 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   4 +-
 13 files changed, 587 insertions(+), 124 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
index e7458cf..4d184cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
@@ -59,6 +59,22 @@ public abstract class UpdateContainerError {
   public abstract void setReason(String reason);
 
   /**
+   * Get current container version.
+   * @return Current container Version.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract int getCurrentContainerVersion();
+
+  /**
+   * Set current container version.
+   * @param currentVersion Current container version.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setCurrentContainerVersion(int currentVersion);
+
+  /**
* Get the {@code UpdateContainerRequest} that was not satisfiable.
* @return UpdateContainerRequest
*/
@@ -89,6 +105,7 @@ public abstract class UpdateContainerError {
   @Override
   public String toString() {
 return "UpdateContainerError{reason=" + getReason() + ", "
++ "currentVersion=" + getCurrentContainerVersion() + ", "
 + "req=" + getUpdateContainerRequest() + "}";
   }
 
@@ -120,6 +137,6 @@ public abstract class UpdateContainerError {
 } else if (!req.equals(other.getUpdateContainerRequest())) {
   return false;
 }
-return true;
+return getCurrentContainerVersion() == other.getCurrentContainerVersion();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index df3c852..c6647c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -78,6 +78,7 @@ message UpdateContainerRequestProto {
 message UpdateContainerErrorProto {
   optional string reason = 1;
   optional UpdateContainerRequestProto update_request = 2;
+  optional int32 current_container_version = 3;
 }
 
 message AllocateRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 

[37/50] [abbrv] hadoop git commit: YARN-5634. Simplify initialization/use of RouterPolicy via a RouterPolicyFacade. (Carlo Curino via Subru).

2017-02-14 Thread subru
YARN-5634. Simplify initialization/use of RouterPolicy via a 
RouterPolicyFacade. (Carlo Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbbeec41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbbeec41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbbeec41

Branch: refs/heads/YARN-2915
Commit: fbbeec418a898e35d24672ec7a1d75162e34b901
Parents: 112b99b
Author: Subru Krishnan 
Authored: Wed Nov 16 19:39:25 2016 -0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../dev-support/findbugs-exclude.xml|   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../yarn/conf/TestYarnConfigurationFields.java  |  12 +
 ...ionPolicyInitializationContextValidator.java |   2 +-
 .../PriorityBroadcastPolicyManager.java |  66 +
 .../federation/policies/RouterPolicyFacade.java | 266 +++
 .../policies/dao/WeightedPolicyInfo.java|   6 +-
 .../utils/FederationStateStoreFacade.java   |  16 +-
 .../TestPriorityBroadcastPolicyManager.java |  72 +
 .../policies/TestRouterPolicyFacade.java| 220 +++
 .../utils/FederationStateStoreTestUtil.java |  22 +-
 11 files changed, 693 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbbeec41/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index bbd03a9..ee51094 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -310,6 +310,15 @@
 
   
 
+  
+
+
+  
+  
+
+
+  
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbbeec41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 58b2f44..aca1177 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2520,6 +2520,19 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 
+  public static final String DEFAULT_FEDERATION_POLICY_KEY = "*";
+
+  public static final String FEDERATION_POLICY_MANAGER = FEDERATION_PREFIX
+  + "policy-manager";
+
+  public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache"
+  + 
".hadoop.yarn.server.federation.policies.UniformBroadcastPolicyManager";
+
+  public static final String FEDERATION_POLICY_MANAGER_PARAMS =
+  FEDERATION_PREFIX + "policy-manager-params";
+
+  public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbbeec41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 3f3a06c..6e33c0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -78,6 +78,18 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
 
+// Federation policies configs to be ignored
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_POLICY_MANAGER);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_POLICY_MANAGER_PARAMS);
+configurationPropsToSkipCompare
+

[30/50] [abbrv] hadoop git commit: YARN-3662. Federation Membership State Store internal APIs.

2017-02-14 Thread subru
YARN-3662. Federation Membership State Store internal APIs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00b32dff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00b32dff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00b32dff

Branch: refs/heads/YARN-2915
Commit: 00b32dff4d56b3d867fa138e382c7a7a5985adbe
Parents: 4aca205
Author: Subru Krishnan 
Authored: Fri Jul 29 16:53:40 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../hadoop-yarn-server-common/pom.xml   |   8 +
 .../store/FederationMembershipStateStore.java   | 126 +
 .../server/federation/store/package-info.java   |  17 ++
 .../store/records/GetSubClusterInfoRequest.java |  62 +
 .../records/GetSubClusterInfoResponse.java  |  62 +
 .../records/GetSubClustersInfoRequest.java  |  66 +
 .../records/GetSubClustersInfoResponse.java |  66 +
 .../records/SubClusterDeregisterRequest.java|  89 +++
 .../records/SubClusterDeregisterResponse.java   |  42 +++
 .../records/SubClusterHeartbeatRequest.java | 149 +++
 .../records/SubClusterHeartbeatResponse.java|  45 
 .../federation/store/records/SubClusterId.java  | 100 +++
 .../store/records/SubClusterInfo.java   | 263 ++
 .../records/SubClusterRegisterRequest.java  |  74 +
 .../records/SubClusterRegisterResponse.java |  44 +++
 .../store/records/SubClusterState.java  |  60 +
 .../impl/pb/GetSubClusterInfoRequestPBImpl.java | 125 +
 .../pb/GetSubClusterInfoResponsePBImpl.java | 134 ++
 .../pb/GetSubClustersInfoRequestPBImpl.java | 108 
 .../pb/GetSubClustersInfoResponsePBImpl.java| 184 +
 .../pb/SubClusterDeregisterRequestPBImpl.java   | 156 +++
 .../pb/SubClusterDeregisterResponsePBImpl.java  |  77 ++
 .../pb/SubClusterHeartbeatRequestPBImpl.java| 192 +
 .../pb/SubClusterHeartbeatResponsePBImpl.java   |  77 ++
 .../records/impl/pb/SubClusterIdPBImpl.java |  75 ++
 .../records/impl/pb/SubClusterInfoPBImpl.java   | 267 +++
 .../pb/SubClusterRegisterRequestPBImpl.java | 134 ++
 .../pb/SubClusterRegisterResponsePBImpl.java|  77 ++
 .../store/records/impl/pb/package-info.java |  17 ++
 .../federation/store/records/package-info.java  |  17 ++
 .../proto/yarn_server_federation_protos.proto   |  93 +++
 .../records/TestFederationProtocolRecords.java  | 133 +
 32 files changed, 3139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00b32dff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index fc23af8..9cc3cae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -58,6 +58,13 @@
   org.apache.hadoop
   hadoop-yarn-common
 
+
+
+  org.apache.hadoop
+  hadoop-yarn-common
+  test-jar
+  test
+
 
 
   com.google.guava
@@ -146,6 +153,7 @@
   yarn_server_common_protos.proto
   yarn_server_common_service_protos.proto
   yarn_server_common_service_protos.proto
+  yarn_server_federation_protos.proto
   ResourceTracker.proto
   SCMUploader.proto
   collectornodemanager_protocol.proto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00b32dff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
new file mode 100644
index 000..378eadc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationMembershipStateStore.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or 

[05/50] [abbrv] hadoop git commit: HADOOP-14069. AliyunOSS: listStatus returns wrong file info. Contributed by Fei Hui

2017-02-14 Thread subru
HADOOP-14069. AliyunOSS: listStatus returns wrong file info. Contributed by Fei 
Hui


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01be4503
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01be4503
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01be4503

Branch: refs/heads/YARN-2915
Commit: 01be4503c3b053d2cff0b179774dabfd267877db
Parents: 839b690
Author: Kai Zheng 
Authored: Mon Feb 13 13:18:20 2017 +0800
Committer: Kai Zheng 
Committed: Mon Feb 13 13:18:20 2017 +0800

--
 .../org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java  | 2 +-
 .../hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java | 7 +++
 2 files changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01be4503/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
index 3426319..0491087 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java
@@ -352,7 +352,7 @@ public class AliyunOSSFileSystem extends FileSystem {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Adding: rd: " + keyPath);
 }
-result.add(new FileStatus(0, true, 1, 0, 0, keyPath));
+result.add(getFileStatus(keyPath));
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01be4503/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index ad8ef6e..3ebf507 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -206,6 +207,12 @@ public class TestAliyunOSSFileSystemContract
 assertTrue("Should be directory",
 this.fs.getFileStatus(dirPath).isDirectory());
 assertFalse("Should not be file", this.fs.getFileStatus(dirPath).isFile());
+
+Path parentPath = this.path("/test/oss");
+for (FileStatus fileStatus: fs.listStatus(parentPath)) {
+  assertTrue("file and directory should be new",
+  fileStatus.getModificationTime() > 0L);
+}
   }
 
   public void testMkdirsForExistingFile() throws Exception {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre Villard via Yongjun Zhang)

2017-02-14 Thread subru
HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre 
Villard via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e53f2b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e53f2b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e53f2b9

Branch: refs/heads/YARN-2915
Commit: 8e53f2b9b08560bf4f8e81e697063277dbdc68f9
Parents: 652679a
Author: Yongjun Zhang 
Authored: Tue Feb 14 12:47:06 2017 -0800
Committer: Yongjun Zhang 
Committed: Tue Feb 14 13:40:53 2017 -0800

--
 .../hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java   | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e53f2b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index 095f41d..f8c15fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -108,6 +110,10 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
 throws IOException {
 this.conf = conf;
 this.confForCreate = confForCreate;
+/** set user pattern based on configuration file */
+UserParam.setUserPattern(
+conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
+DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: YARN-5300. Exclude generated federation protobuf sources from YARN Javadoc/findbugs build

2017-02-14 Thread subru
YARN-5300. Exclude generated federation protobuf sources from YARN 
Javadoc/findbugs build


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aca205b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aca205b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aca205b

Branch: refs/heads/YARN-2915
Commit: 4aca205bde41c526c9ef7fd8c2ecf7399fd02b03
Parents: 353a9b2
Author: Subru Krishnan 
Authored: Tue Jul 19 15:08:25 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml | 3 +++
 hadoop-yarn-project/hadoop-yarn/pom.xml  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aca205b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index c090749..2f5451d 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -21,6 +21,9 @@
 
   
   
+
+  
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aca205b/hadoop-yarn-project/hadoop-yarn/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/pom.xml
index c43588a..99b8b5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/pom.xml
@@ -75,7 +75,7 @@
 org.apache.maven.plugins
 maven-javadoc-plugin
 
-  
org.apache.hadoop.yarn.proto
+  
org.apache.hadoop.yarn.proto:org.apache.hadoop.yarn.federation.proto
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-5407. In-memory based implementation of the FederationApplicationStateStore/FederationPolicyStateStore. (Ellen Hui via Subru)

2017-02-14 Thread subru
YARN-5407. In-memory based implementation of the 
FederationApplicationStateStore/FederationPolicyStateStore. (Ellen Hui via 
Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed2c6c79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed2c6c79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed2c6c79

Branch: refs/heads/YARN-2915
Commit: ed2c6c791ed1190e9ef4a8fc544f17af3802f2a6
Parents: 630cb6c
Author: Subru Krishnan 
Authored: Tue Aug 9 16:07:55 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../store/impl/MemoryFederationStateStore.java  | 158 +++-
 ...SubClusterPoliciesConfigurationsRequest.java |   2 +-
 ...ubClusterPoliciesConfigurationsResponse.java |   2 +-
 ...GetSubClusterPolicyConfigurationRequest.java |   3 +-
 ...etSubClusterPolicyConfigurationResponse.java |   2 +-
 ...SetSubClusterPolicyConfigurationRequest.java |  20 +-
 ...etSubClusterPolicyConfigurationResponse.java |   2 +-
 .../records/SubClusterPolicyConfiguration.java  |  27 +-
 ...tApplicationHomeSubClusterRequestPBImpl.java |   4 +
 ...ClusterPolicyConfigurationRequestPBImpl.java |  17 -
 .../pb/SubClusterPolicyConfigurationPBImpl.java |  17 +
 .../proto/yarn_server_federation_protos.proto   |   8 +-
 .../impl/FederationStateStoreBaseTest.java  | 367 ++-
 .../impl/TestMemoryFederationStateStore.java|   4 +-
 14 files changed, 558 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed2c6c79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index cea4ac2..a540dff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -20,35 +20,72 @@ package org.apache.hadoop.yarn.server.federation.store.impl;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
-import 
org.apache.hadoop.yarn.server.federation.store.FederationMembershipStateStore;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationsHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest;
+import 

[47/50] [abbrv] hadoop git commit: YARN-3671. Integrate Federation services with ResourceManager. Contributed by Subru Krishnan

2017-02-14 Thread subru
YARN-3671. Integrate Federation services with ResourceManager. Contributed by 
Subru Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ef27f1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ef27f1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ef27f1b

Branch: refs/heads/YARN-2915
Commit: 5ef27f1b0f2211a222b98bbf2c8d7212c76bb1fc
Parents: e637a07
Author: Jian He 
Authored: Tue Aug 30 12:20:52 2016 +0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  11 +-
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +-
 .../failover/FederationProxyProviderUtil.java   |   2 +-
 .../FederationRMFailoverProxyProvider.java  |   4 +-
 ...ationMembershipStateStoreInputValidator.java |   7 +-
 .../TestFederationStateStoreInputValidator.java |  10 +-
 .../server/resourcemanager/ResourceManager.java |  26 ++
 .../FederationStateStoreHeartbeat.java  | 108 +++
 .../federation/FederationStateStoreService.java | 304 +++
 .../federation/package-info.java|  17 ++
 .../webapp/dao/ClusterMetricsInfo.java  |   5 +-
 .../TestFederationRMStateStoreService.java  | 170 +++
 12 files changed, 648 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ef27f1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a2c42fd..f3062e2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2494,9 +2494,6 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_PREFIX + "failover.enabled";
   public static final boolean DEFAULT_FEDERATION_FAILOVER_ENABLED = true;
 
-  public static final String FEDERATION_SUBCLUSTER_ID =
-  FEDERATION_PREFIX + "sub-cluster.id";
-
   public static final String FEDERATION_STATESTORE_CLIENT_CLASS =
   FEDERATION_PREFIX + "state-store.class";
 
@@ -2509,6 +2506,14 @@ public class YarnConfiguration extends Configuration {
   // 5 minutes
   public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
 
+  public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
+
+  // 5 minutes
+  public static final int
+  DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  5 * 60;
+
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ef27f1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index c4d8f38..5e0876f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -72,9 +72,9 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
 configurationPropsToSkipCompare
-.add(YarnConfiguration.FEDERATION_SUBCLUSTER_ID);
-configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ef27f1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java

[17/50] [abbrv] hadoop git commit: HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML processor. Contributed by Wei-Chiu Chuang.

2017-02-14 Thread subru
HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML 
processor. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cf59937
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cf59937
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cf59937

Branch: refs/heads/YARN-2915
Commit: 0cf5993712a01993bd701bd9664e6af284378b55
Parents: 1fa084c
Author: Wei-Chiu Chuang 
Authored: Tue Feb 14 08:59:12 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Tue Feb 14 09:11:55 2017 -0800

--
 .../tools/offlineImageViewer/TestOfflineImageViewer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf59937/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 740a8ab..dacbb85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -158,6 +160,15 @@ public class TestOfflineImageViewer {
   hdfs.mkdirs(invalidXMLDir);
   dirCount++;
 
+  //Create a directory with sticky bits
+  Path stickyBitDir = new Path("/stickyBit");
+  hdfs.mkdirs(stickyBitDir);
+  hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL,
+  FsAction.ALL, FsAction.ALL, true));
+  dirCount++;
+  writtenFiles.put(stickyBitDir.toString(),
+  hdfs.getFileStatus(stickyBitDir));
+
   // Get delegation tokens so we log the delegation token op
   Token[] delegationTokens = hdfs
   .addDelegationTokens(TEST_RENEWER, null);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: HDFS-11403. Zookeper ACLs on NN HA enabled clusters should be handled consistently. Contributed by Hanisha Koneru.

2017-02-14 Thread subru
HDFS-11403. Zookeper ACLs on NN HA enabled clusters should be handled 
consistently. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0aacd8fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0aacd8fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0aacd8fd

Branch: refs/heads/YARN-2915
Commit: 0aacd8fd2530f9f5febbe81ec05cd958cc0c3e2c
Parents: 07a5184
Author: Arpit Agarwal 
Authored: Sat Feb 11 01:17:56 2017 -0800
Committer: Arpit Agarwal 
Committed: Sat Feb 11 01:17:56 2017 -0800

--
 .../apache/hadoop/ha/ActiveStandbyElector.java  | 37 ++--
 .../hadoop/ha/TestActiveStandbyElector.java | 31 
 2 files changed, 65 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aacd8fd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index 9a90ccf..db853f4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -346,8 +346,13 @@ public class ActiveStandbyElector implements StatCallback, 
StringCallback {
 createWithRetries(prefixPath, new byte[]{}, zkAcl, 
CreateMode.PERSISTENT);
   } catch (KeeperException e) {
 if (isNodeExists(e.code())) {
-  // This is OK - just ensuring existence.
-  continue;
+  // Set ACLs for parent node, if they do not exist or are different
+  try {
+setAclsWithRetries(prefixPath);
+  } catch (KeeperException e1) {
+throw new IOException("Couldn't set ACLs on parent ZNode: " +
+prefixPath, e1);
+  }
 } else {
   throw new IOException("Couldn't create " + prefixPath, e);
 }
@@ -1066,14 +1071,36 @@ public class ActiveStandbyElector implements 
StatCallback, StringCallback {
 });
   }
 
+  private void setAclsWithRetries(final String path)
+  throws KeeperException, InterruptedException {
+Stat stat = new Stat();
+zkDoWithRetries(new ZKAction() {
+  @Override
+  public Void run() throws KeeperException, InterruptedException {
+List acl = zkClient.getACL(path, stat);
+if (acl == null || !acl.containsAll(zkAcl) ||
+!zkAcl.containsAll(acl)) {
+  zkClient.setACL(path, zkAcl, stat.getVersion());
+}
+return null;
+  }
+}, Code.BADVERSION);
+  }
+
   private  T zkDoWithRetries(ZKAction action) throws KeeperException,
   InterruptedException {
+return zkDoWithRetries(action, null);
+  }
+
+  private  T zkDoWithRetries(ZKAction action, Code retryCode)
+  throws KeeperException, InterruptedException {
 int retry = 0;
 while (true) {
   try {
 return action.run();
   } catch (KeeperException ke) {
-if (shouldRetry(ke.code()) && ++retry < maxRetryNum) {
+if ((shouldRetry(ke.code()) || shouldRetry(ke.code(), retryCode))
+&& ++retry < maxRetryNum) {
   continue;
 }
 throw ke;
@@ -1189,6 +1216,10 @@ public class ActiveStandbyElector implements 
StatCallback, StringCallback {
   private static boolean shouldRetry(Code code) {
 return code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT;
   }
+
+  private static boolean shouldRetry(Code code, Code retryIfCode) {
+return (retryIfCode == null ? false : retryIfCode == code);
+  }
   
   @Override
   public String toString() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0aacd8fd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
index 8c6f7a3..ca3389f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java
@@ -715,6 +715,37 @@ public class TestActiveStandbyElector {
   }
 }
   }
+
+  /**
+   * Test that ACLs are set on parent zNode even if the node already exists.
+   */
+  @Test
+  public void 

[39/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-02-14 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1764cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
index e57709f..5de749f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.yarn.server.federation.policies.router;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
@@ -30,34 +30,27 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 
-import java.util.Map;
-
 /**
  * This implements a simple load-balancing policy. The policy "weights" are
  * binary 0/1 values that enable/disable each sub-cluster, and the policy peaks
  * the sub-cluster with the least load to forward this application.
  */
-public class LoadBasedRouterPolicy
-extends BaseWeightedRouterPolicy {
-
-  private static final Log LOG =
-  LogFactory.getLog(LoadBasedRouterPolicy.class);
+public class LoadBasedRouterPolicy extends AbstractRouterPolicy {
 
   @Override
-  public void reinitialize(FederationPolicyInitializationContext
-  federationPolicyContext)
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
   throws FederationPolicyInitializationException {
 
 // remember old policyInfo
 WeightedPolicyInfo tempPolicy = getPolicyInfo();
 
-//attempt new initialization
-super.reinitialize(federationPolicyContext);
+// attempt new initialization
+super.reinitialize(policyContext);
 
-//check extra constraints
+// check extra constraints
 for (Float weight : getPolicyInfo().getRouterPolicyWeights().values()) {
   if (weight != 0 && weight != 1) {
-//reset to old policyInfo if check fails
+// reset to old policyInfo if check fails
 setPolicyInfo(tempPolicy);
 throw new FederationPolicyInitializationException(
 this.getClass().getCanonicalName()
@@ -69,18 +62,16 @@ public class LoadBasedRouterPolicy
 
   @Override
   public SubClusterId getHomeSubcluster(
-  ApplicationSubmissionContext appSubmissionContext)
-  throws YarnException {
+  ApplicationSubmissionContext appSubmissionContext) throws YarnException {
 
 Map activeSubclusters =
 getActiveSubclusters();
 
-Map weights = getPolicyInfo()
-.getRouterPolicyWeights();
+Map weights =
+getPolicyInfo().getRouterPolicyWeights();
 SubClusterIdInfo chosen = null;
 long currBestMem = -1;
-for (Map.Entry entry :
-activeSubclusters
+for (Map.Entry entry : activeSubclusters
 .entrySet()) {
   SubClusterIdInfo id = new SubClusterIdInfo(entry.getKey());
   if (weights.containsKey(id) && weights.get(id) > 0) {
@@ -95,8 +86,7 @@ public class LoadBasedRouterPolicy
 return chosen.toId();
   }
 
-  private long getAvailableMemory(SubClusterInfo value)
-  throws YarnException {
+  private long getAvailableMemory(SubClusterInfo value) throws YarnException {
 try {
   long mem = -1;
   JSONObject obj = new JSONObject(value.getCapability());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1764cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
 

[40/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-02-14 Thread subru
YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed1764cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed1764cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed1764cb

Branch: refs/heads/YARN-2915
Commit: ed1764cb98235db2fdd35a482f4cde3266e88f31
Parents: 6cf5730
Author: Subru Krishnan 
Authored: Thu Oct 13 17:59:13 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../AbstractConfigurableFederationPolicy.java   | 155 +
 .../policies/ConfigurableFederationPolicy.java  |   9 +-
 .../FederationPolicyInitializationContext.java  |  37 +-
 ...ionPolicyInitializationContextValidator.java |  28 +-
 .../policies/FederationPolicyManager.java   |  59 +-
 .../amrmproxy/AbstractAMRMProxyPolicy.java  |  47 ++
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |  85 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  25 +-
 .../LocalityMulticastAMRMProxyPolicy.java   | 583 +++
 .../policies/amrmproxy/package-info.java|   1 -
 .../policies/dao/WeightedPolicyInfo.java| 180 +++---
 .../federation/policies/dao/package-info.java   |   1 -
 .../policies/exceptions/package-info.java   |   1 -
 .../federation/policies/package-info.java   |   1 -
 .../policies/router/AbstractRouterPolicy.java   |  47 ++
 .../router/BaseWeightedRouterPolicy.java| 150 -
 .../policies/router/FederationRouterPolicy.java |   5 +-
 .../policies/router/LoadBasedRouterPolicy.java  |  36 +-
 .../policies/router/PriorityRouterPolicy.java   |  19 +-
 .../router/UniformRandomRouterPolicy.java   |  28 +-
 .../router/WeightedRandomRouterPolicy.java  |  32 +-
 .../policies/router/package-info.java   |   1 -
 .../resolver/AbstractSubClusterResolver.java|   4 +-
 .../policies/BaseFederationPoliciesTest.java|  28 +-
 ...ionPolicyInitializationContextValidator.java |  25 +-
 .../TestBroadcastAMRMProxyFederationPolicy.java | 112 
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 566 ++
 .../router/TestLoadBasedRouterPolicy.java   |  18 +-
 .../router/TestPriorityRouterPolicy.java|  15 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  35 +-
 .../utils/FederationPoliciesTestUtil.java   |  64 ++
 .../src/test/resources/nodes|   6 +-
 32 files changed, 1950 insertions(+), 453 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed1764cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
new file mode 100644
index 000..4cb9bbe
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import java.util.Map;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.NoActiveSubclustersException;
+import 

[25/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-02-14 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc338ed0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
index 4975a9f..5fa02d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.PriorityBroadcastPolicyManager;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
 import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc338ed0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
deleted file mode 100644
index 542a5ae..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.federation.policies;
-
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy;
-import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
-import org.junit.Before;
-
-/**
- * Simple test of {@link UniformBroadcastPolicyManager}.
- */
-public class TestUniformBroadcastPolicyManager extends BasePolicyManagerTest {
-
-  @Before
-  public void setup() {
-//config policy
-wfp = new UniformBroadcastPolicyManager();
-wfp.setQueue("queue1");
-
-//set expected params that the base test class will use for tests
-expectedPolicyManager = UniformBroadcastPolicyManager.class;
-expectedAMRMProxyPolicy = BroadcastAMRMProxyPolicy.class;
-expectedRouterPolicy = UniformRandomRouterPolicy.class;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fc338ed0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
 

[22/50] [abbrv] hadoop git commit: YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router Failover. (Ellen Hui via Subru)

2017-02-14 Thread subru
YARN-5519. Add SubClusterId in AddApplicationHomeSubClusterResponse for Router 
Failover. (Ellen Hui via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b0386e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b0386e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b0386e3

Branch: refs/heads/YARN-2915
Commit: 0b0386e3c156e345951c2e9deefc9eb459d687f0
Parents: ed2c6c7
Author: Subru Krishnan 
Authored: Mon Aug 15 14:47:02 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 ...ederationApplicationHomeSubClusterStore.java | 21 +++---
 .../store/impl/MemoryFederationStateStore.java  | 22 +++---
 .../AddApplicationHomeSubClusterResponse.java   | 29 ++--
 ...ApplicationHomeSubClusterResponsePBImpl.java | 39 +++
 .../proto/yarn_server_federation_protos.proto   |  1 +
 .../impl/FederationStateStoreBaseTest.java  | 71 +---
 6 files changed, 120 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b0386e3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
index 22bb88a..ace2457 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -51,15 +51,20 @@ public interface FederationApplicationHomeSubClusterStore {
   /**
* Register the home {@code SubClusterId} of the newly submitted
* {@code ApplicationId}. Currently response is empty if the operation was
-   * successful, if not an exception reporting reason for a failure.
+   * successful, if not an exception reporting reason for a failure. If a
+   * mapping for the application already existed, the {@code SubClusterId} in
+   * this response will return the existing mapping which might be different
+   * from that in the {@code AddApplicationHomeSubClusterRequest}.
*
* @param request the request to register a new application with its home
*  sub-cluster
-   * @return empty on successful registration of the application in the
-   * StateStore, if not an exception reporting reason for a failure
+   * @return upon successful registration of the application in the StateStore,
+   * {@code AddApplicationHomeSubClusterRequest} containing the home
+   * sub-cluster of the application. Otherwise, an exception reporting
+   * reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  AddApplicationHomeSubClusterResponse addApplicationHomeSubClusterMap(
+  AddApplicationHomeSubClusterResponse addApplicationHomeSubCluster(
   AddApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -73,7 +78,7 @@ public interface FederationApplicationHomeSubClusterStore {
* not an exception reporting reason for a failure
* @throws YarnException if the request is invalid/fails
*/
-  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubClusterMap(
+  UpdateApplicationHomeSubClusterResponse updateApplicationHomeSubCluster(
   UpdateApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -85,7 +90,7 @@ public interface FederationApplicationHomeSubClusterStore {
* subcluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationHomeSubClusterResponse getApplicationHomeSubClusterMap(
+  GetApplicationHomeSubClusterResponse getApplicationHomeSubCluster(
   GetApplicationHomeSubClusterRequest request) throws YarnException;
 
   /**
@@ -96,7 +101,7 @@ public interface FederationApplicationHomeSubClusterStore {
* @return the mapping of all submitted application to it's home sub-cluster
* @throws YarnException if the request is invalid/fails
*/
-  GetApplicationsHomeSubClusterResponse getApplicationsHomeSubClusterMap(
+  GetApplicationsHomeSubClusterResponse getApplicationsHomeSubCluster(
   

[13/50] [abbrv] hadoop git commit: HDFS-11408. The config name of balance bandwidth is out of date. Contributed by Yiqun Lin.

2017-02-14 Thread subru
HDFS-11408. The config name of balance bandwidth is out of date. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4164a203
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4164a203
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4164a203

Branch: refs/heads/YARN-2915
Commit: 4164a2032a41e7318749efd0301751eb2b369cdc
Parents: 719df99
Author: Yiqun Lin 
Authored: Tue Feb 14 18:57:20 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 14 18:57:20 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 2 +-
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 2 +-
 .../main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java | 2 +-
 .../hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java | 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 +-
 .../hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md  | 2 +-
 9 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7b6a4e5..e0ccd62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2233,7 +2233,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
   /**
* Requests the namenode to tell all datanodes to use a new, non-persistent
-   * bandwidth value for dfs.balance.bandwidthPerSec.
+   * bandwidth value for dfs.datanode.balance.bandwidthPerSec.
* See {@link ClientProtocol#setBalancerBandwidth(long)}
* for more details.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index e9475d8..30dcfa4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1599,7 +1599,7 @@ public class DistributedFileSystem extends FileSystem {
 
   /**
* Requests the namenode to tell all datanodes to use a new, non-persistent
-   * bandwidth value for dfs.balance.bandwidthPerSec.
+   * bandwidth value for dfs.datanode.balance.bandwidthPerSec.
* The bandwidth parameter is the max number of bytes per second of network
* bandwidth to be used by a datanode during balancing.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 407621b..eaebd6f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -938,7 +938,7 @@ public interface ClientProtocol {
 
   /**
* Tell all datanodes to use a new, non-persistent bandwidth value for
-   * dfs.balance.bandwidthPerSec.
+   * dfs.datanode.balance.bandwidthPerSec.
*
* @param bandwidth Blanacer bandwidth in bytes per second for this datanode.
* @throws IOException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
--
diff --git 

[45/50] [abbrv] hadoop git commit: YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni Matteo Fumarola via Subru)

2017-02-14 Thread subru
YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni 
Matteo Fumarola via Subru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6028f167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6028f167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6028f167

Branch: refs/heads/YARN-2915
Commit: 6028f167707e456f0db2f1f6aabff0d369627c3b
Parents: 9ea752f
Author: Subru Krishnan 
Authored: Wed Aug 17 12:07:06 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../store/impl/MemoryFederationStateStore.java  |   30 +
 ...cationHomeSubClusterStoreInputValidator.java |  183 +++
 ...ationMembershipStateStoreInputValidator.java |  317 +
 .../FederationPolicyStoreInputValidator.java|  144 ++
 ...derationStateStoreInvalidInputException.java |   48 +
 .../federation/store/utils/package-info.java|   17 +
 .../impl/FederationStateStoreBaseTest.java  |6 +-
 .../TestFederationStateStoreInputValidator.java | 1265 ++
 8 files changed, 2007 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6028f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
index 8144435..6e564dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -57,6 +57,9 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegister
 import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationApplicationHomeSubClusterStoreInputValidator;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationMembershipStateStoreInputValidator;
+import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator;
 import org.apache.hadoop.yarn.server.records.Version;
 import org.apache.hadoop.yarn.util.MonotonicClock;
 
@@ -88,6 +91,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public SubClusterRegisterResponse registerSubCluster(
   SubClusterRegisterRequest request) throws YarnException {
+FederationMembershipStateStoreInputValidator
+.validateSubClusterRegisterRequest(request);
 SubClusterInfo subClusterInfo = request.getSubClusterInfo();
 membership.put(subClusterInfo.getSubClusterId(), subClusterInfo);
 return SubClusterRegisterResponse.newInstance();
@@ -96,6 +101,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public SubClusterDeregisterResponse deregisterSubCluster(
   SubClusterDeregisterRequest request) throws YarnException {
+FederationMembershipStateStoreInputValidator
+.validateSubClusterDeregisterRequest(request);
 SubClusterInfo subClusterInfo = membership.get(request.getSubClusterId());
 if (subClusterInfo == null) {
   throw new YarnException(
@@ -111,6 +118,8 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   public SubClusterHeartbeatResponse subClusterHeartbeat(
   SubClusterHeartbeatRequest request) throws YarnException {
 
+FederationMembershipStateStoreInputValidator
+.validateSubClusterHeartbeatRequest(request);
 SubClusterId subClusterId = request.getSubClusterId();
 SubClusterInfo subClusterInfo = membership.get(subClusterId);
 
@@ -129,6 +138,9 @@ public class MemoryFederationStateStore implements 
FederationStateStore {
   @Override
   public GetSubClusterInfoResponse getSubCluster(
   GetSubClusterInfoRequest request) throws YarnException {
+
+FederationMembershipStateStoreInputValidator
+

[21/50] [abbrv] hadoop git commit: YARN-6061. Addendum. Remove extraneous change.

2017-02-14 Thread subru
YARN-6061. Addendum. Remove extraneous change.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/353a9b2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/353a9b2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/353a9b2d

Branch: refs/heads/YARN-2915
Commit: 353a9b2d9165a221491395edbadf8acc3a39990b
Parents: 8e53f2b
Author: Karthik Kambatla 
Authored: Tue Feb 14 15:19:52 2017 -0800
Committer: Karthik Kambatla 
Committed: Tue Feb 14 15:19:52 2017 -0800

--
 .../scheduler/fair/policies/DominantResourceFairnessPolicy.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/353a9b2d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index 7a29735..ad41b11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
@@ -90,7 +90,7 @@ public class DominantResourceFairnessPolicy extends 
SchedulingPolicy {
 
   @Override
   public boolean checkIfUsageOverFairShare(Resource usage, Resource fairShare) 
{
-return Resources.greaterThan(CALCULATOR, null, usage, fairShare);
+return !Resources.fitsIn(usage, fairShare);
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDFS-11379. DFSInputStream may infinite loop requesting block locations. Contributed by Daryn Sharp. [Forced Update!]

2017-02-14 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2915 08dc09581 -> 8d9be8425 (forced update)


HDFS-11379. DFSInputStream may infinite loop requesting block locations. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07a5184f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07a5184f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07a5184f

Branch: refs/heads/YARN-2915
Commit: 07a5184f74fdeffc42cdaec42ad4378c0e41c541
Parents: 2b7a7bb
Author: Kihwal Lee 
Authored: Fri Feb 10 12:27:08 2017 -0600
Committer: Kihwal Lee 
Committed: Fri Feb 10 12:27:08 2017 -0600

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 48 --
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 51 
 2 files changed, 70 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07a5184f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 5783f90..39d0eed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -421,33 +421,36 @@ public class DFSInputStream extends FSInputStream
   }
   else {
 // search cached blocks first
-int targetBlockIdx = locatedBlocks.findBlock(offset);
-if (targetBlockIdx < 0) { // block is not cached
-  targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
-  // fetch more blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, 
offset);
-  assert (newBlocks != null) : "Could not find target position " + 
offset;
-  locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
-}
-blk = locatedBlocks.get(targetBlockIdx);
+blk = fetchBlockAt(offset, 0, true);
   }
   return blk;
 }
   }
 
   /** Fetch a block from namenode and cache it */
-  protected void fetchBlockAt(long offset) throws IOException {
+  protected LocatedBlock fetchBlockAt(long offset) throws IOException {
+return fetchBlockAt(offset, 0, false); // don't use cache
+  }
+
+  /** Fetch a block from namenode and cache it */
+  private LocatedBlock fetchBlockAt(long offset, long length, boolean useCache)
+  throws IOException {
 synchronized(infoLock) {
   int targetBlockIdx = locatedBlocks.findBlock(offset);
   if (targetBlockIdx < 0) { // block is not cached
 targetBlockIdx = LocatedBlocks.getInsertIndex(targetBlockIdx);
+useCache = false;
   }
-  // fetch blocks
-  final LocatedBlocks newBlocks = dfsClient.getLocatedBlocks(src, offset);
-  if (newBlocks == null) {
-throw new IOException("Could not find target position " + offset);
+  if (!useCache) { // fetch blocks
+final LocatedBlocks newBlocks = (length == 0)
+? dfsClient.getLocatedBlocks(src, offset)
+: dfsClient.getLocatedBlocks(src, offset, length);
+if (newBlocks == null || newBlocks.locatedBlockCount() == 0) {
+  throw new EOFException("Could not find target position " + offset);
+}
+locatedBlocks.insertRange(targetBlockIdx, 
newBlocks.getLocatedBlocks());
   }
-  locatedBlocks.insertRange(targetBlockIdx, newBlocks.getLocatedBlocks());
+  return locatedBlocks.get(targetBlockIdx);
 }
   }
 
@@ -502,28 +505,15 @@ public class DFSInputStream extends FSInputStream
   assert (locatedBlocks != null) : "locatedBlocks is null";
   List blockRange = new ArrayList<>();
   // search cached blocks first
-  int blockIdx = locatedBlocks.findBlock(offset);
-  if (blockIdx < 0) { // block is not cached
-blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
-  }
   long remaining = length;
   long curOff = offset;
   while(remaining > 0) {
-LocatedBlock blk = null;
-if(blockIdx < locatedBlocks.locatedBlockCount())
-  blk = locatedBlocks.get(blockIdx);
-if (blk == null || curOff < blk.getStartOffset()) {
-  LocatedBlocks newBlocks;
-  newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining);
-  locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
-  continue;
-}
+LocatedBlock blk = fetchBlockAt(curOff, remaining, true);
 assert curOff 

[19/50] [abbrv] hadoop git commit: YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei Gu via kasha)

2017-02-14 Thread subru
YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei 
Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/652679aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/652679aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/652679aa

Branch: refs/heads/YARN-2915
Commit: 652679aa8ad6f9e61b8ed8e2b04b3e0332025e94
Parents: aaf2713
Author: Karthik Kambatla 
Authored: Tue Feb 14 13:39:34 2017 -0800
Committer: Karthik Kambatla 
Committed: Tue Feb 14 13:39:41 2017 -0800

--
 .../hadoop/yarn/client/TestRMFailover.java  | 100 ++-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +
 .../server/resourcemanager/RMContextImpl.java   |  10 ++
 ...MCriticalThreadUncaughtExceptionHandler.java |  58 +++
 .../resourcemanager/RMFatalEventType.java   |   5 +-
 .../server/resourcemanager/ResourceManager.java |  65 +---
 .../resourcemanager/recovery/RMStateStore.java  |  13 +--
 .../DominantResourceFairnessPolicy.java |   2 +-
 8 files changed, 226 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/652679aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index b58a775..4bf6a78 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -22,7 +22,10 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
@@ -37,14 +40,18 @@ import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.junit.After;
@@ -174,7 +181,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 // so it transitions to standby.
 ResourceManager rm = cluster.getResourceManager(
 cluster.getActiveRMIndex());
-rm.handleTransitionToStandBy();
+rm.handleTransitionToStandByInNewThread();
 int maxWaitingAttempts = 2000;
 while (maxWaitingAttempts-- > 0 ) {
   if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
@@ -349,4 +356,95 @@ public class TestRMFailover extends ClientBaseWithFixes {
 }
 return redirectUrl;
   }
+
+  /**
+   * Throw {@link RuntimeException} inside a thread of
+   * {@link ResourceManager} with HA enabled and check if the
+   * {@link ResourceManager} is transited to standby state.
+   *
+   * @throws InterruptedException if any
+   */
+  @Test
+  public void testUncaughtExceptionHandlerWithHAEnabled()
+  throws InterruptedException {
+conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
+cluster.init(conf);
+cluster.start();
+assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
+
+ResourceManager resourceManager = cluster.getResourceManager(
+cluster.getActiveRMIndex());
+
+final RMCriticalThreadUncaughtExceptionHandler exHandler =
+new 

[44/50] [abbrv] hadoop git commit: YARN-5467. InputValidator for the FederationStateStore internal APIs. (Giovanni Matteo Fumarola via Subru)

2017-02-14 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6028f167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
new file mode 100644
index 000..13175ae
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/utils/TestFederationStateStoreInputValidator.java
@@ -0,0 +1,1265 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.utils;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+import 
org.apache.hadoop.yarn.server.federation.store.records.UpdateApplicationHomeSubClusterRequest;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Unit tests for FederationApplicationInputValidator,
+ * FederationMembershipInputValidator, and FederationPolicyInputValidator.
+ */
+public class TestFederationStateStoreInputValidator {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestFederationStateStoreInputValidator.class);
+
+  private static SubClusterId subClusterId;
+  private static String amRMServiceAddress;
+  private static String clientRMServiceAddress;
+  private static String rmAdminServiceAddress;
+  private static String rmWebServiceAddress;
+  private static int lastHeartBeat;
+  private static SubClusterState stateNew;
+  private static SubClusterState stateLost;
+  private static ApplicationId appId;
+  private static int lastStartTime;
+  private static String capability;
+  private static String queue;
+  private static String type;
+  private static ByteBuffer params;
+
+  private static SubClusterId subClusterIdInvalid;
+  private static SubClusterId subClusterIdNull;
+
+  private static int lastHeartBeatNegative;
+  private static int lastStartTimeNegative;
+
+  private static SubClusterState stateNull;
+  private static ApplicationId appIdNull;
+
+  private static String capabilityNull;
+  private static String capabilityEmpty;
+
+  private static String addressNull;
+  private static String addressEmpty;
+  private static String addressWrong;
+  private static String addressWrongPort;
+
+  private static String queueEmpty;
+  private static String 

[11/50] [abbrv] hadoop git commit: HADOOP-13929. ADLS connector should not check in contract-test-options.xml. (John Zhuge via lei)

2017-02-14 Thread subru
HADOOP-13929. ADLS connector should not check in contract-test-options.xml. 
(John Zhuge via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71c23c9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71c23c9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71c23c9f

Branch: refs/heads/YARN-2915
Commit: 71c23c9fc94cfdf58de80effbc3f51c0925d0cfe
Parents: 4ed33e9
Author: Lei Xu 
Authored: Mon Feb 13 13:33:13 2017 -0800
Committer: Lei Xu 
Committed: Mon Feb 13 13:33:13 2017 -0800

--
 .gitignore  | 12 ++--
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java |  2 +-
 .../src/site/markdown/index.md  |  4 +-
 .../fs/adl/live/AdlStorageConfiguration.java| 42 +++---
 .../src/test/resources/adls.xml | 11 
 .../test/resources/contract-test-options.xml| 61 
 6 files changed, 39 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c23c9f/.gitignore
--
diff --git a/.gitignore b/.gitignore
index eb98a3f..724162d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,6 +17,10 @@ target
 build
 dependency-reduced-pom.xml
 
+# Filesystem contract test options and credentials
+auth-keys.xml
+azure-auth-keys.xml
+
 # External tool builders
 */.externalToolBuilders
 */maven-eclipse.xml
@@ -24,8 +28,6 @@ dependency-reduced-pom.xml
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
-hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tmp
@@ -41,10 +43,4 @@ 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
 yarnregistry.pdf
-hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
-hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/
-hadoop-tools/hadoop-aliyun/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aliyun/src/test/resources/contract-test-options.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c23c9f/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
index 3d41025..303b7bc 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlFileSystem.java
@@ -72,7 +72,7 @@ import static org.apache.hadoop.fs.adl.AdlConfKeys.*;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class AdlFileSystem extends FileSystem {
-  static final String SCHEME = "adl";
+  public static final String SCHEME = "adl";
   static final int DEFAULT_PORT = 443;
   private URI uri;
   private String userName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c23c9f/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
--
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md 
b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index ced5cff..5037db6 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -224,7 +224,9 @@ commands demonstrate access to a storage account named 
`youraccount`.
 ## Testing the 
azure-datalake-store Module
 The hadoop-azure module includes a full suite of unit tests. Most of the tests 
will run without additional configuration by running mvn test. This includes 
tests against mocked storage, which is an in-memory emulation of Azure Data 
Lake Storage.
 
-A selection of tests can run against the Azure Data Lake Storage. To run tests 
against Adl storage. Please configure contract-test-options.xml with Adl 
account information mentioned in the above sections. Also turn on contract test 
execution flag to 

[27/50] [abbrv] hadoop git commit: YARN-3673. Create a FailoverProxy for Federation services. Contributed by Subru Krishnan

2017-02-14 Thread subru
YARN-3673. Create a FailoverProxy for Federation services. Contributed by Subru 
Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e637a07e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e637a07e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e637a07e

Branch: refs/heads/YARN-2915
Commit: e637a07e1514ab1e6935a6a7194a8a883db54138
Parents: 6028f16
Author: Jian He 
Authored: Mon Aug 22 14:43:07 2016 +0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../org/apache/hadoop/yarn/conf/HAUtil.java |  30 ++-
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../TestFederationRMFailoverProxyProvider.java  | 154 ++
 .../hadoop/yarn/client/ClientRMProxy.java   |   4 +-
 .../org/apache/hadoop/yarn/client/RMProxy.java  |  23 +-
 .../src/main/resources/yarn-default.xml |   7 +
 .../hadoop-yarn-server-common/pom.xml   |   2 -
 .../hadoop/yarn/server/api/ServerRMProxy.java   |   4 +-
 .../failover/FederationProxyProviderUtil.java   | 163 ++
 .../FederationRMFailoverProxyProvider.java  | 211 +++
 .../federation/failover/package-info.java   |  17 ++
 12 files changed, 613 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e637a07e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
index e4948e7..942b08a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.yarn.conf;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -27,8 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
-import java.net.InetSocketAddress;
-import java.util.Collection;
+import com.google.common.annotations.VisibleForTesting;
 
 @InterfaceAudience.Private
 public class HAUtil {
@@ -44,6 +45,29 @@ public class HAUtil {
   }
 
   /**
+   * Returns true if Federation is configured.
+   *
+   * @param conf Configuration
+   * @return true if federation is configured in the configuration; else false.
+   */
+  public static boolean isFederationEnabled(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.FEDERATION_ENABLED,
+YarnConfiguration.DEFAULT_FEDERATION_ENABLED);
+  }
+
+  /**
+   * Returns true if RM failover is enabled in a Federation setting.
+   *
+   * @param conf Configuration
+   * @return if RM failover is enabled in conjunction with Federation in the
+   * configuration; else false.
+   */
+  public static boolean isFederationFailoverEnabled(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.FEDERATION_FAILOVER_ENABLED,
+YarnConfiguration.DEFAULT_FEDERATION_FAILOVER_ENABLED);
+  }
+
+  /**
* Returns true if Resource Manager HA is configured.
*
* @param conf Configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e637a07e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 72b0d26..a2c42fd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2487,6 +2487,16 @@ public class YarnConfiguration extends Configuration {
 
   public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
 
+  public static final String FEDERATION_ENABLED = FEDERATION_PREFIX + 
"enabled";
+  public static final boolean DEFAULT_FEDERATION_ENABLED = false;
+
+  public 

[18/50] [abbrv] hadoop git commit: HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use volatile instead of synchronized. Contributed by Chen Liang.

2017-02-14 Thread subru
HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use 
volatile instead of synchronized. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aaf27132
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aaf27132
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aaf27132

Branch: refs/heads/YARN-2915
Commit: aaf27132350547fcde1fdb372f19626838f44bc4
Parents: 0cf5993
Author: Xiaoyu Yao 
Authored: Tue Feb 14 12:52:34 2017 -0800
Committer: Xiaoyu Yao 
Committed: Tue Feb 14 12:52:34 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf27132/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 41735b1..acbcffa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -51,7 +51,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private long lastUpdate;
   private long lastUpdateMonotonic;
   private int xceiverCount;
-  private String location = NetworkTopology.DEFAULT_RACK;
+  private volatile String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
@@ -293,11 +293,11 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 
   /** network location */
   @Override
-  public synchronized String getNetworkLocation() {return location;}
+  public String getNetworkLocation() {return location;}
 
   /** Sets the network location */
   @Override
-  public synchronized void setNetworkLocation(String location) {
+  public void setNetworkLocation(String location) {
 this.location = NodeBase.normalize(location);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo Curino via Subru).

2017-02-14 Thread subru
YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo 
Curino via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/288c55e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/288c55e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/288c55e7

Branch: refs/heads/YARN-2915
Commit: 288c55e7cf0a9245bf6259b31ebd28126b12c29d
Parents: ed985a3
Author: Subru Krishnan 
Authored: Wed Sep 7 17:33:34 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../policies/ConfigurableFederationPolicy.java  |  44 +++
 .../policies/FederationPolicyConfigurator.java  |  91 +
 .../FederationPolicyInitializationContext.java  | 109 
 ...ionPolicyInitializationContextValidator.java |  82 
 .../policies/FederationPolicyWriter.java|  45 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  66 ++
 .../policies/amrmproxy/package-info.java|  20 +++
 .../exceptions/FederationPolicyException.java   |  33 +
 ...FederationPolicyInitializationException.java |  33 +
 .../NoActiveSubclustersException.java   |  27 
 .../exceptions/UnknownSubclusterException.java  |  28 
 .../policies/exceptions/package-info.java   |  20 +++
 .../federation/policies/package-info.java   |  20 +++
 .../policies/router/FederationRouterPolicy.java |  45 +++
 .../policies/router/package-info.java   |  20 +++
 ...ionPolicyInitializationContextValidator.java | 128 +++
 .../utils/FederationPoliciesTestUtil.java   |  83 
 17 files changed, 894 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/288c55e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
new file mode 100644
index 000..fd6ceea
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+
+/**
+ * This interface provides a general method to reinitialize a policy. The
+ * semantics are try-n-swap, so in case of an exception is thrown the
+ * implmentation must ensure the previous state and configuration is preserved.
+ */
+public interface ConfigurableFederationPolicy {
+
+  /**
+   * This method is invoked to initialize of update the configuration of
+   * policies. The implementor should provide try-n-swap semantics, and retain
+   * state if possible.
+   *
+   * @param federationPolicyInitializationContext the new context to provide to
+   *  implementor.
+   *
+   * @throws FederationPolicyInitializationException in case the initialization
+   * fails.
+   */
+  void reinitialize(
+  FederationPolicyInitializationContext
+  federationPolicyInitializationContext)
+  throws FederationPolicyInitializationException;
+}


[29/50] [abbrv] hadoop git commit: YARN-3662. Federation Membership State Store internal APIs.

2017-02-14 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00b32dff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
new file mode 100644
index 000..d4c5451
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterDeregisterRequestPBImpl.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.records.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterDeregisterRequestProtoOrBuilder;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterIdProto;
+import 
org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.SubClusterStateProto;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Protocol buffer based implementation of {@link SubClusterDeregisterRequest}.
+ */
+@Private
+@Unstable
+public class SubClusterDeregisterRequestPBImpl
+extends SubClusterDeregisterRequest {
+
+  private SubClusterDeregisterRequestProto proto =
+  SubClusterDeregisterRequestProto.getDefaultInstance();
+  private SubClusterDeregisterRequestProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  public SubClusterDeregisterRequestPBImpl() {
+builder = SubClusterDeregisterRequestProto.newBuilder();
+  }
+
+  public SubClusterDeregisterRequestPBImpl(
+  SubClusterDeregisterRequestProto proto) {
+this.proto = proto;
+viaProto = true;
+  }
+
+  public SubClusterDeregisterRequestProto getProto() {
+mergeLocalToProto();
+proto = viaProto ? proto : builder.build();
+viaProto = true;
+return proto;
+  }
+
+  private void mergeLocalToProto() {
+if (viaProto) {
+  maybeInitBuilder();
+}
+mergeLocalToBuilder();
+proto = builder.build();
+viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+if (viaProto || builder == null) {
+  builder = SubClusterDeregisterRequestProto.newBuilder(proto);
+}
+viaProto = false;
+  }
+
+  private void mergeLocalToBuilder() {
+  }
+
+  @Override
+  public int hashCode() {
+return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+if (other == null) {
+  return false;
+}
+if (other.getClass().isAssignableFrom(this.getClass())) {
+  return this.getProto().equals(this.getClass().cast(other).getProto());
+}
+return false;
+  }
+
+  @Override
+  public String toString() {
+return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public SubClusterId getSubClusterId() {
+SubClusterDeregisterRequestProtoOrBuilder p = viaProto ? proto : builder;
+if (!p.hasSubClusterId()) {
+  return null;
+}
+return convertFromProtoFormat(p.getSubClusterId());
+  }
+
+  @Override
+  public void setSubClusterId(SubClusterId subClusterId) {
+maybeInitBuilder();
+if (subClusterId == null) {
+  builder.clearSubClusterId();
+  return;
+}
+

[24/50] [abbrv] hadoop git commit: YARN-5324. Stateless Federation router policies implementation. (Carlo Curino via Subru).

2017-02-14 Thread subru
YARN-5324. Stateless Federation router policies implementation. (Carlo Curino 
via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cf5730c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cf5730c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cf5730c

Branch: refs/heads/YARN-2915
Commit: 6cf5730c1b40f87a78050ef71b4b9129e6b2963b
Parents: 288c55e
Author: Subru Krishnan 
Authored: Thu Sep 22 17:06:57 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../policies/FederationPolicyConfigurator.java  |  91 ---
 .../FederationPolicyInitializationContext.java  |  11 +-
 .../policies/FederationPolicyManager.java   | 126 +
 .../policies/FederationPolicyWriter.java|  45 
 .../policies/dao/WeightedPolicyInfo.java| 253 +++
 .../federation/policies/dao/package-info.java   |  20 ++
 .../router/BaseWeightedRouterPolicy.java| 150 +++
 .../policies/router/LoadBasedRouterPolicy.java  | 109 
 .../policies/router/PriorityRouterPolicy.java   |  66 +
 .../router/UniformRandomRouterPolicy.java   |  85 +++
 .../router/WeightedRandomRouterPolicy.java  |  79 ++
 .../store/records/SubClusterIdInfo.java |  75 ++
 .../policies/BaseFederationPoliciesTest.java| 155 
 ...ionPolicyInitializationContextValidator.java |  17 +-
 .../router/TestLoadBasedRouterPolicy.java   | 109 
 .../router/TestPriorityRouterPolicy.java|  87 +++
 .../router/TestUniformRandomRouterPolicy.java   |  65 +
 .../router/TestWeightedRandomRouterPolicy.java  | 127 ++
 .../utils/FederationPoliciesTestUtil.java   |  82 +-
 19 files changed, 1604 insertions(+), 148 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf5730c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
deleted file mode 100644
index fdc3857..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyConfigurator.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.federation.policies;
-
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
-
-
-import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
-
-import org.apache.hadoop.yarn.server.federation.policies.router
-.FederationRouterPolicy;
-
-/**
- * Implementors of this interface are capable to instantiate and (re)initalize
- * {@link FederationAMRMProxyPolicy} and {@link FederationRouterPolicy} based 
on
- * a {@link FederationPolicyInitializationContext}. The reason to bind these 
two
- * policies together is to make sure we remain consistent across the router and
- * amrmproxy policy decisions.
- */
-public interface FederationPolicyConfigurator {
-
-  /**
-   * If the current instance is compatible, this method returns the same
-   * instance of {@link FederationAMRMProxyPolicy} reinitialized with the
-   * current context, otherwise a new instance initialized with the current
-   * context is provided. If the instance is compatible with the current class
-   * the implementors should attempt to reinitalize (retaining state). To 
affect
-   * a complete policy reset 

[31/50] [abbrv] hadoop git commit: YARN-5612. Return SubClusterId in FederationStateStoreFacade#addApplicationHomeSubCluster for Router Failover. (Giovanni Matteo Fumarola via Subru).

2017-02-14 Thread subru
YARN-5612. Return SubClusterId in 
FederationStateStoreFacade#addApplicationHomeSubCluster for Router Failover. 
(Giovanni Matteo Fumarola via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2994dff4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2994dff4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2994dff4

Branch: refs/heads/YARN-2915
Commit: 2994dff465922d30ea88f08a7821eff047967707
Parents: 5ef27f1
Author: Subru Krishnan 
Authored: Thu Sep 1 13:55:54 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../utils/FederationStateStoreFacade.java   | 11 ---
 .../utils/TestFederationStateStoreFacade.java   | 30 
 2 files changed, 37 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2994dff4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index f1c8218..66a0b60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.GetApplicationHomeSubClusterResponse;
@@ -298,13 +299,15 @@ public final class FederationStateStoreFacade {
*
* @param appHomeSubCluster the mapping of the application to it's home
*  sub-cluster
+   * @return the stored Subcluster from StateStore
* @throws YarnException if the call to the state store is unsuccessful
*/
-  public void addApplicationHomeSubCluster(
+  public SubClusterId addApplicationHomeSubCluster(
   ApplicationHomeSubCluster appHomeSubCluster) throws YarnException {
-stateStore.addApplicationHomeSubCluster(
-AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
-return;
+AddApplicationHomeSubClusterResponse response =
+stateStore.addApplicationHomeSubCluster(
+
AddApplicationHomeSubClusterRequest.newInstance(appHomeSubCluster));
+return response.getHomeSubCluster();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2994dff4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
index 53f4f84..d46bef0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacade.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore;
+import 

[43/50] [abbrv] hadoop git commit: YARN-5601. Make the RM epoch base value configurable. Contributed by Subru Krishnan

2017-02-14 Thread subru
YARN-5601. Make the RM epoch base value configurable. Contributed by Subru 
Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed985a3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed985a3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed985a3d

Branch: refs/heads/YARN-2915
Commit: ed985a3dd5e61aff57312447b6810163c3972ffe
Parents: 2994dff
Author: Jian He 
Authored: Fri Sep 2 12:23:57 2016 +0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml | 5 -
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java  | 3 +++
 .../apache/hadoop/yarn/conf/TestYarnConfigurationFields.java | 2 ++
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java  | 7 +++
 .../resourcemanager/recovery/FileSystemRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/LeveldbRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/MemoryRMStateStore.java  | 1 +
 .../yarn/server/resourcemanager/recovery/RMStateStore.java   | 4 
 .../yarn/server/resourcemanager/recovery/ZKRMStateStore.java | 2 +-
 .../resourcemanager/recovery/RMStateStoreTestBase.java   | 8 +---
 .../server/resourcemanager/recovery/TestFSRMStateStore.java  | 1 +
 .../resourcemanager/recovery/TestLeveldbRMStateStore.java| 1 +
 .../server/resourcemanager/recovery/TestZKRMStateStore.java  | 1 +
 13 files changed, 32 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed985a3d/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2f5451d..bbd03a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -293,7 +293,10 @@
   
   
 
-
+
+  
+  
+
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed985a3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f3062e2..58b2f44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -142,6 +142,9 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_HOSTNAME = RM_PREFIX + "hostname";
 
+  public static final String RM_EPOCH = RM_PREFIX + "epoch";
+  public static final long DEFAULT_RM_EPOCH = 0L;
+
   /** The address of the applications manager interface in the RM.*/
   public static final String RM_ADDRESS = 
 RM_PREFIX + "address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed985a3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 5e0876f..3f3a06c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -75,6 +75,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.RM_EPOCH);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed985a3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java

[38/50] [abbrv] hadoop git commit: YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via Subru).

2017-02-14 Thread subru
YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via 
Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d9be842
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d9be842
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d9be842

Branch: refs/heads/YARN-2915
Commit: 8d9be8425ef44bb6bbdccd87a7d08fff4e455857
Parents: a34d8eb
Author: Subru Krishnan 
Authored: Tue Nov 22 18:37:30 2016 -0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../amrmproxy/RejectAMRMProxyPolicy.java| 67 +
 .../manager/RejectAllPolicyManager.java | 40 ++
 .../policies/router/RejectRouterPolicy.java | 66 +
 .../amrmproxy/TestRejectAMRMProxyPolicy.java| 78 
 .../manager/TestRejectAllPolicyManager.java | 40 ++
 .../policies/router/TestRejectRouterPolicy.java | 63 
 6 files changed, 354 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d9be842/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
new file mode 100644
index 000..3783df6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+/**
+ * An implementation of the {@link FederationAMRMProxyPolicy} that simply
+ * rejects all requests. Useful to prevent apps from accessing any sub-cluster.
+ */
+public class RejectAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
+
+  private Set knownClusterIds = new HashSet<>();
+
+  @Override
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
+  throws FederationPolicyInitializationException {
+// overrides initialize to avoid weight checks that do no apply for
+// this policy.
+FederationPolicyInitializationContextValidator.validate(policyContext,
+this.getClass().getCanonicalName());
+setPolicyContext(policyContext);
+  }
+
+  @Override
+  public Map splitResourceRequests(
+  List resourceRequests) throws YarnException {
+throw new FederationPolicyException("The policy configured for this queue "
++ "rejects all routing requests by construction.");
+  }
+
+  @Override
+  public void notifyOfResponse(SubClusterId subClusterId,
+  AllocateResponse response) throws YarnException {
+// This might be invoked for applications started with 

[35/50] [abbrv] hadoop git commit: YARN-5905. Update the RM webapp host that is reported as part of Federation membership to current primary RM's IP.

2017-02-14 Thread subru
YARN-5905. Update the RM webapp host that is reported as part of Federation 
membership to current primary RM's IP.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a34d8ebf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a34d8ebf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a34d8ebf

Branch: refs/heads/YARN-2915
Commit: a34d8ebf73be2bb9f9a55f674027510fe40da691
Parents: fc338ed
Author: Subru Krishnan 
Authored: Tue Nov 22 18:30:40 2016 -0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../federation/FederationStateStoreService.java  |  4 ++--
 .../federation/TestFederationRMStateStoreService.java| 11 ++-
 2 files changed, 12 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a34d8ebf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
index 9a01d7e..530184f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/federation/FederationStateStoreService.java
@@ -177,8 +177,8 @@ public class FederationStateStoreService extends 
AbstractService
 config.getSocketAddr(YarnConfiguration.RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
 YarnConfiguration.DEFAULT_RM_ADMIN_PORT));
-String webAppAddress =
-WebAppUtils.getResolvedRemoteRMWebAppURLWithoutScheme(config);
+String webAppAddress = getServiceAddress(NetUtils
+.createSocketAddr(WebAppUtils.getRMWebAppURLWithScheme(config)));
 
 SubClusterInfo subClusterInfo = SubClusterInfo.newInstance(subClusterId,
 amRMAddress, clientRMAddress, rmAdminAddress, webAppAddress,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a34d8ebf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
index 30f69b5..d92a793 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/federation/TestFederationRMStateStoreService.java
@@ -19,6 +19,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.federation;
 
 import java.io.IOException;
 import java.io.StringReader;
+import java.net.UnknownHostException;
 
 import javax.xml.bind.JAXBException;
 
@@ -157,12 +158,20 @@ public class TestFederationRMStateStoreService {
   }
 
   private String checkSubClusterInfo(SubClusterState state)
-  throws YarnException {
+  throws YarnException, UnknownHostException {
 Assert.assertNotNull(stateStore.getSubCluster(request));
 SubClusterInfo response =
 stateStore.getSubCluster(request).getSubClusterInfo();
 Assert.assertEquals(state, response.getState());
 Assert.assertTrue(response.getLastHeartBeat() >= lastHearbeatTS);
+String expectedAddress =
+(response.getClientRMServiceAddress().split(":"))[0];
+Assert.assertEquals(expectedAddress,
+(response.getAMRMServiceAddress().split(":"))[0]);
+Assert.assertEquals(expectedAddress,
+(response.getRMAdminServiceAddress().split(":"))[0]);
+

[50/50] [abbrv] hadoop git commit: YARN-5307. Federation Application State Store internal APIs

2017-02-14 Thread subru
YARN-5307. Federation Application State Store internal APIs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c503284
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c503284
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c503284

Branch: refs/heads/YARN-2915
Commit: 4c50328448666fb1192b0af4447dcd587d541425
Parents: 9e5e94b
Author: Subru Krishnan 
Authored: Fri Aug 5 11:52:44 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 ...ederationApplicationHomeSubClusterStore.java | 126 
 .../AddApplicationHomeSubClusterRequest.java|  72 +++
 .../AddApplicationHomeSubClusterResponse.java   |  44 +
 .../records/ApplicationHomeSubCluster.java  | 124 
 .../DeleteApplicationHomeSubClusterRequest.java |  65 +++
 ...DeleteApplicationHomeSubClusterResponse.java |  43 +
 .../GetApplicationHomeSubClusterRequest.java|  64 +++
 .../GetApplicationHomeSubClusterResponse.java   |  73 +++
 .../GetApplicationsHomeSubClusterRequest.java   |  40 
 .../GetApplicationsHomeSubClusterResponse.java  |  75 
 .../UpdateApplicationHomeSubClusterRequest.java |  74 
 ...UpdateApplicationHomeSubClusterResponse.java |  43 +
 ...dApplicationHomeSubClusterRequestPBImpl.java | 132 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 .../pb/ApplicationHomeSubClusterPBImpl.java | 167 
 ...eApplicationHomeSubClusterRequestPBImpl.java | 130 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 ...tApplicationHomeSubClusterRequestPBImpl.java | 135 +
 ...ApplicationHomeSubClusterResponsePBImpl.java | 132 +
 ...ApplicationsHomeSubClusterRequestPBImpl.java |  78 
 ...pplicationsHomeSubClusterResponsePBImpl.java | 190 +++
 .../pb/GetSubClustersInfoResponsePBImpl.java|   6 +-
 ...eApplicationHomeSubClusterRequestPBImpl.java | 132 +
 ...ApplicationHomeSubClusterResponsePBImpl.java |  78 
 .../proto/yarn_server_federation_protos.proto   |  45 -
 .../records/TestFederationProtocolRecords.java  |  81 
 26 files changed, 2301 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c503284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
new file mode 100644
index 000..217ee2e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationApplicationHomeSubClusterStore.java
@@ -0,0 +1,126 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.DeleteApplicationHomeSubClusterResponse;
+import 

[36/50] [abbrv] hadoop git commit: YARN-3672. Create Facade for Federation State and Policy Store. Contributed by Subru Krishnan

2017-02-14 Thread subru
YARN-3672. Create Facade for Federation State and Policy Store. Contributed by 
Subru Krishnan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ea752f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ea752f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ea752f2

Branch: refs/heads/YARN-2915
Commit: 9ea752f237c30288d7ef397f29ff43244698b511
Parents: 0b0386e
Author: Jian He 
Authored: Wed Aug 17 11:13:19 2016 +0800
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 hadoop-project/pom.xml  |  13 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../src/main/resources/yarn-default.xml |  20 +-
 .../hadoop-yarn-server-common/pom.xml   |  10 +
 .../utils/FederationStateStoreFacade.java   | 532 +++
 .../server/federation/utils/package-info.java   |  17 +
 .../utils/FederationStateStoreTestUtil.java | 149 ++
 .../utils/TestFederationStateStoreFacade.java   | 148 ++
 9 files changed, 905 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ea752f2/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 606f7fc..080c812 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -93,6 +93,9 @@
 2.0.0-M21
 1.0.0-M33
 
+1.0.0
+3.0.3
+
 
 1.8
 
@@ -1245,6 +1248,16 @@
   kerb-simplekdc
   1.0.0-RC2
 
+
+  javax.cache
+  cache-api
+  ${jcache.version}
+
+
+  org.ehcache
+  ehcache
+  ${ehcache.version}
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ea752f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a047450..72b0d26 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2486,6 +2486,19 @@ public class YarnConfiguration extends Configuration {
   
 
   public static final String FEDERATION_PREFIX = YARN_PREFIX + "federation.";
+
+  public static final String FEDERATION_STATESTORE_CLIENT_CLASS =
+  FEDERATION_PREFIX + "state-store.class";
+
+  public static final String DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS =
+  
"org.apache.hadoop.yarn.server.federation.store.impl.MemoryFederationStateStore";
+
+  public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS =
+  FEDERATION_PREFIX + "cache-ttl.secs";
+
+  // 5 minutes
+  public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
+
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ea752f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 3da4bab..bfc2534 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -68,6 +68,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
 
configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR);
 
+// Federation default configs to be ignored
+configurationPropsToSkipCompare
+.add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
+
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"
 configurationPropsToSkipCompare.add(YarnConfiguration.


[23/50] [abbrv] hadoop git commit: YARN-5324. Stateless Federation router policies implementation. (Carlo Curino via Subru).

2017-02-14 Thread subru
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf5730c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
index 8c2115b..f901329 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/FederationPoliciesTestUtil.java
@@ -19,13 +19,20 @@ package org.apache.hadoop.yarn.server.federation.utils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.ConfigurableFederationPolicy;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl;
 import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
-import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import org.apache.hadoop.yarn.server.federation.store.records.*;
 
 import java.net.URL;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
@@ -41,6 +48,41 @@ public final class FederationPoliciesTestUtil {
 // disabled.
   }
 
+
+  public static void initializePolicyContext(
+  FederationPolicyInitializationContext fpc, ConfigurableFederationPolicy
+  policy, WeightedPolicyInfo policyInfo,
+  Map activeSubclusters)
+  throws YarnException {
+ByteBuffer buf = policyInfo.toByteBuffer();
+fpc.setSubClusterPolicyConfiguration(SubClusterPolicyConfiguration
+.newInstance("queue1", policy.getClass().getCanonicalName(), buf));
+FederationStateStoreFacade facade = FederationStateStoreFacade
+.getInstance();
+FederationStateStore fss = mock(FederationStateStore.class);
+
+if (activeSubclusters == null) {
+  activeSubclusters = new HashMap();
+}
+GetSubClustersInfoResponse response = GetSubClustersInfoResponse
+.newInstance(new 
ArrayList(activeSubclusters.values()));
+
+when(fss.getSubClusters(any())).thenReturn(response);
+facade.reinitialize(fss, new Configuration());
+fpc.setFederationStateStoreFacade(facade);
+policy.reinitialize(fpc);
+  }
+
+  public static void initializePolicyContext(
+  ConfigurableFederationPolicy policy,
+  WeightedPolicyInfo policyInfo, Map activeSubclusters) throws YarnException {
+FederationPolicyInitializationContext context =
+new FederationPolicyInitializationContext(null, initResolver(),
+initFacade());
+initializePolicyContext(context, policy, policyInfo, activeSubclusters);
+  }
+
   /**
* Initialize a {@link SubClusterResolver}.
*
@@ -66,18 +108,52 @@ public final class FederationPoliciesTestUtil {
* Initialiaze a main-memory {@link FederationStateStoreFacade} used for
* testing, wiht a mock resolver.
*
+   * @param subClusterInfos the list of subclusters to be served on
+   *getSubClusters invocations.
+   *
* @return the facade.
*
* @throws YarnException in case the initialization is not successful.
*/
-  public static FederationStateStoreFacade initFacade() throws YarnException {
+
+  public static FederationStateStoreFacade initFacade(
+  List subClusterInfos, SubClusterPolicyConfiguration
+  policyConfiguration) throws YarnException {
 FederationStateStoreFacade goodFacade = FederationStateStoreFacade
 .getInstance();
 FederationStateStore fss = mock(FederationStateStore.class);
 GetSubClustersInfoResponse response = GetSubClustersInfoResponse
-.newInstance(new ArrayList<>());
+.newInstance(subClusterInfos);
 when(fss.getSubClusters(any())).thenReturn(response);
+
+List configurations = new ArrayList<>();
+

[42/50] [abbrv] hadoop git commit: YARN-5406. In-memory based implementation of the FederationMembershipStateStore. Contributed by Ellen Hui.

2017-02-14 Thread subru
YARN-5406. In-memory based implementation of the 
FederationMembershipStateStore. Contributed by Ellen Hui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbb8884f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbb8884f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbb8884f

Branch: refs/heads/YARN-2915
Commit: bbb8884fac8629d1ddbd3e99820692aca0581fd0
Parents: 00b32df
Author: Subru Krishnan 
Authored: Thu Aug 4 15:54:38 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../store/impl/MemoryFederationStateStore.java  | 138 
 .../federation/store/impl/package-info.java |  17 ++
 .../records/GetSubClustersInfoRequest.java  |   4 +
 .../store/records/SubClusterState.java  |   4 +
 .../impl/FederationStateStoreBaseTest.java  | 221 +++
 .../impl/TestMemoryFederationStateStore.java|  49 
 6 files changed, 433 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbb8884f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
new file mode 100644
index 000..7fdc4a9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/MemoryFederationStateStore.java
@@ -0,0 +1,138 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import 
org.apache.hadoop.yarn.server.federation.store.FederationMembershipStateStore;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterDeregisterResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClustersInfoResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterHeartbeatResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterRegisterResponse;
+import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.util.MonotonicClock;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * In-memory implementation of FederationMembershipStateStore.
+ */
+public class MemoryFederationStateStore
+implements FederationMembershipStateStore {
+
+  private final Map membership =
+  new ConcurrentHashMap();
+  private final MonotonicClock clock = new MonotonicClock();
+
+  @Override
+  public Version getMembershipStateStoreVersion() {
+return null;
+  }
+
+  @Override
+  public 

[34/50] [abbrv] hadoop git commit: YARN-3664. Federation PolicyStore internal APIs

2017-02-14 Thread subru
YARN-3664. Federation PolicyStore internal APIs


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edbcd9bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edbcd9bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edbcd9bd

Branch: refs/heads/YARN-2915
Commit: edbcd9bd40e0593dabee3df0ac4d2f75a605f064
Parents: 4c50328
Author: Subru Krishnan 
Authored: Fri Aug 5 12:34:58 2016 -0700
Committer: Subru Krishnan 
Committed: Tue Feb 14 16:01:14 2017 -0800

--
 .../federation/store/FederationPolicyStore.java |  76 
 ...SubClusterPoliciesConfigurationsRequest.java |  35 
 ...ubClusterPoliciesConfigurationsResponse.java |  66 +++
 ...GetSubClusterPolicyConfigurationRequest.java |  62 ++
 ...etSubClusterPolicyConfigurationResponse.java |  65 +++
 ...SetSubClusterPolicyConfigurationRequest.java |  79 
 ...etSubClusterPolicyConfigurationResponse.java |  36 
 .../records/SubClusterPolicyConfiguration.java  | 130 +
 ...sterPoliciesConfigurationsRequestPBImpl.java |  95 +
 ...terPoliciesConfigurationsResponsePBImpl.java | 191 +++
 ...ClusterPolicyConfigurationRequestPBImpl.java | 103 ++
 ...lusterPolicyConfigurationResponsePBImpl.java | 143 ++
 .../pb/GetSubClustersInfoResponsePBImpl.java|   4 +-
 ...ClusterPolicyConfigurationRequestPBImpl.java | 159 +++
 ...lusterPolicyConfigurationResponsePBImpl.java |  93 +
 .../pb/SubClusterPolicyConfigurationPBImpl.java | 121 
 .../proto/yarn_server_federation_protos.proto   |  28 +++
 .../records/TestFederationProtocolRecords.java  |  53 -
 18 files changed, 1536 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edbcd9bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
new file mode 100644
index 000..9d9bd9b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/FederationPolicyStore.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPoliciesConfigurationsResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.GetSubClusterPolicyConfigurationResponse;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationRequest;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SetSubClusterPolicyConfigurationResponse;
+
+/**
+ * The FederationPolicyStore provides a key-value interface to access the
+ * policies configured for the system. The key is a "queue" name, i.e., the
+ * system allows to configure a different policy for each queue in the system
+ * (though each policy can make dynamic run-time decisions on a 
per-job/per-task
+ * basis). The value is a {@code SubClusterPolicyConfiguration}, a serialized
+ * representation of the policy type and its 

[16/50] [abbrv] hadoop git commit: YARN-5912. Fix breadcrumb issues for various pages in new YARN UI. Contributed by Akhil P B.

2017-02-14 Thread subru
YARN-5912. Fix breadcrumb issues for various pages in new YARN UI. Contributed 
by Akhil P B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fa084c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fa084c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fa084c4

Branch: refs/heads/YARN-2915
Commit: 1fa084c4254b89cd45210727ccb68725d583ff62
Parents: b9f8491
Author: Sunil G 
Authored: Tue Feb 14 22:29:21 2017 +0530
Committer: Sunil G 
Committed: Tue Feb 14 22:29:21 2017 +0530

--
 .../webapp/app/controllers/yarn-app-attempt.js  |  2 +-
 .../webapp/app/controllers/yarn-app-attempts.js |  2 +-
 .../src/main/webapp/app/controllers/yarn-app.js |  2 +-
 .../main/webapp/app/controllers/yarn-apps.js|  2 +-
 .../app/controllers/yarn-container-log.js   |  7 +++-
 .../webapp/app/controllers/yarn-node-app.js |  7 +++-
 .../webapp/app/controllers/yarn-node-apps.js|  2 +-
 .../app/controllers/yarn-node-container.js  | 39 
 .../app/controllers/yarn-node-containers.js |  2 +-
 .../main/webapp/app/controllers/yarn-node.js|  2 +-
 .../webapp/app/controllers/yarn-services.js |  2 +-
 .../src/main/webapp/app/models/yarn-app.js  |  2 +-
 .../src/main/webapp/app/routes/yarn-node-app.js |  2 +-
 .../webapp/app/routes/yarn-node-container.js|  2 +-
 .../controllers/yarn-node-container-test.js | 30 +++
 15 files changed, 90 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index a458842..4c02361 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
index 9ebc2a6..92de2f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index 309c895..9c1cb5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
index 396f83b..18bf682 100644
--- 

[10/50] [abbrv] hadoop git commit: HDFS-11026. Convert BlockTokenIdentifier to use Protobuf. Contributed by Ewan Higgs.

2017-02-14 Thread subru
HDFS-11026. Convert BlockTokenIdentifier to use Protobuf. Contributed by Ewan 
Higgs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ed33e9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ed33e9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ed33e9c

Branch: refs/heads/YARN-2915
Commit: 4ed33e9ca3d85568e3904753a3ef61a85f801838
Parents: 646c6d6
Author: Chris Douglas 
Authored: Mon Feb 13 11:27:48 2017 -0800
Committer: Chris Douglas 
Committed: Mon Feb 13 11:29:18 2017 -0800

--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  51 
 .../token/block/BlockTokenIdentifier.java   |  89 +-
 .../src/main/proto/hdfs.proto   |  33 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +
 .../token/block/BlockTokenSecretManager.java|  18 +-
 .../hadoop/hdfs/server/balancer/KeyManager.java |   6 +-
 .../server/blockmanagement/BlockManager.java|   9 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   5 +-
 .../src/main/resources/hdfs-default.xml |   9 +
 .../security/token/block/TestBlockToken.java| 297 +--
 10 files changed, 480 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ed33e9c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 0180828..ad80bc2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -121,9 +121,11 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmI
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTokenSecretProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto;
@@ -584,6 +586,55 @@ public class PBHelperClient {
 return blockTokens;
   }
 
+  public static AccessModeProto convert(BlockTokenIdentifier.AccessMode aMode) 
{
+switch (aMode) {
+case READ: return AccessModeProto.READ;
+case WRITE: return AccessModeProto.WRITE;
+case COPY: return AccessModeProto.COPY;
+case REPLACE: return AccessModeProto.REPLACE;
+default:
+  throw new IllegalArgumentException("Unexpected AccessMode: " + aMode);
+}
+  }
+
+  public static BlockTokenIdentifier.AccessMode convert(
+  AccessModeProto accessModeProto) {
+switch (accessModeProto) {
+case READ: return BlockTokenIdentifier.AccessMode.READ;
+case WRITE: return BlockTokenIdentifier.AccessMode.WRITE;
+case COPY: return BlockTokenIdentifier.AccessMode.COPY;
+case REPLACE: return BlockTokenIdentifier.AccessMode.REPLACE;
+default:
+  throw new IllegalArgumentException("Unexpected AccessModeProto: " +
+  accessModeProto);
+}
+  }
+
+  public static BlockTokenSecretProto convert(
+  BlockTokenIdentifier blockTokenSecret) {
+BlockTokenSecretProto.Builder builder =
+BlockTokenSecretProto.newBuilder();
+builder.setExpiryDate(blockTokenSecret.getExpiryDate());
+builder.setKeyId(blockTokenSecret.getKeyId());
+String userId = blockTokenSecret.getUserId();
+if (userId != null) {
+  builder.setUserId(userId);
+}
+
+String blockPoolId = blockTokenSecret.getBlockPoolId();
+if (blockPoolId != null) {
+  builder.setBlockPoolId(blockPoolId);
+}
+
+builder.setBlockId(blockTokenSecret.getBlockId());
+
+for (BlockTokenIdentifier.AccessMode aMode :
+blockTokenSecret.getAccessModes()) {
+  builder.addModes(convert(aMode));
+}
+return builder.build();
+  }
+
   static public DatanodeInfo convert(DatanodeInfoProto di) {
 if (di == 

[12/50] [abbrv] hadoop git commit: HDFS-11407. Document the missing usages of OfflineImageViewer processors. Contributed by Yiqun Lin.

2017-02-14 Thread subru
HDFS-11407. Document the missing usages of OfflineImageViewer processors. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/719df99c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/719df99c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/719df99c

Branch: refs/heads/YARN-2915
Commit: 719df99c05a4e0bc62a7cad6d22c8dbe5d92bde0
Parents: 71c23c9
Author: Yiqun Lin 
Authored: Tue Feb 14 18:48:08 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 14 18:48:08 2017 +0800

--
 .../src/site/markdown/HdfsImageViewer.md| 63 
 1 file changed, 63 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/719df99c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index b677f6a..10d98b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -133,6 +133,69 @@ Applying the Offline Image Viewer with XML processor would 
result in the followi
  
...remaining output omitted...
 
+### ReverseXML Processor
+
+ReverseXML processor is the opposite of the XML processor. Users can specify 
input XML file and output fsimage file via -i and -o command-line.
+
+   bash$ bin/hdfs oiv -p ReverseXML -i fsimage.xml -o fsimage
+
+This will reconstruct an fsimage from an XML file.
+
+### FileDistribution Processor
+
+FileDistribution processor can analyze file sizes in the namespace image. 
Users can specify maxSize (128GB by default) and step (2MB by default) in bytes 
via -maxSize and -step command-line.
+
+   bash$ bin/hdfs oiv -p FileDistribution -maxSize maxSize -step size -i 
fsimage -o output
+
+The processor will calculate how many files in the system fall into each 
segment. The output file is formatted as a tab separated two column table 
showed as the following output:
+
+   SizeNumFiles
+   4   1
+   12  1
+   16  1
+   20  1
+   totalFiles = 4
+   totalDirectories = 2
+   totalBlocks = 4
+   totalSpace = 48
+   maxFileSize = 21
+
+To make the output result look more readable, users can specify -format option 
in addition.
+
+   bash$ bin/hdfs oiv -p FileDistribution -maxSize maxSize -step size 
-format -i fsimage -o output
+
+This would result in the following output:
+
+   Size Range  NumFiles
+   (0 B, 4 B]  1
+   (8 B, 12 B] 1
+   (12 B, 16 B]1
+   (16 B, 21 B]1
+   totalFiles = 4
+   totalDirectories = 2
+   totalBlocks = 4
+   totalSpace = 48
+   maxFileSize = 21
+
+### Delimited Processor
+
+Delimited processor generates a text representation of the fsimage, with each 
element separated by a delimiter string (\t by default). Users can specify a 
new delimiter string by -delimiter option.
+
+   bash$ bin/hdfs oiv -p Delimited -delimiter delimiterString -i fsimage 
-o output
+
+In addition, users can specify a temporary dir to cache intermediate result by 
the following command:
+
+   bash$ bin/hdfs oiv -p Delimited -delimiter delimiterString -t 
temporaryDir -i fsimage -o output
+
+If not set, Delimited processor will construct the namespace in memory before 
outputting text. The output result of this processor should be like the 
following output:
+
+   PathReplication ModificationTimeAccessTime  
PreferredBlockSize  BlocksCount FileSizeNSQUOTA DSQUOTA 
Permission  UserNameGroupName
+   /   0   2017-02-13 10:391970-01-01 08:000   
0   0   9223372036854775807 -1  drwxr-xr-x  root
supergroup
+   /dir0   0   2017-02-13 10:391970-01-01 08:000   
0   0   -1  -1  drwxr-xr-x  rootsupergroup
+   /dir0/file0 1   2017-02-13 10:392017-02-13 10:39
134217728   1   1   0   0   -rw-r--r--  root
supergroup
+   /dir0/file1 1   2017-02-13 10:392017-02-13 10:39
134217728   1   1   0   0   -rw-r--r--  root
supergroup
+   /dir0/file2 1   2017-02-13 10:392017-02-13 10:39
134217728   1   1   0   0   -rw-r--r--  root
supergroup
+
 Options
 ---
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional 

[15/50] [abbrv] hadoop git commit: HADOOP-14058. Fix NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. Contributed by Yiqun Lin.

2017-02-14 Thread subru
HADOOP-14058. Fix 
NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f84912
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f84912
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f84912

Branch: refs/heads/YARN-2915
Commit: b9f8491252f5a23a91a1d695d748556a0fd803ae
Parents: aaf106f
Author: Akira Ajisaka 
Authored: Wed Feb 15 01:45:56 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 01:45:56 2017 +0900

--
 .../hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f84912/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index ef223ac..261f79b 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -85,7 +85,7 @@ public abstract class NativeS3FileSystemContractBaseTest
 
   public void testDirWithDifferentMarkersWorks() throws Exception {
 
-for (int i = 0; i < 3; i++) {
+for (int i = 0; i <= 3; i++) {
   String base = "test/hadoop" + i;
   Path path = path("/" + base);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: HADOOP-13075. Add support for SSE-KMS and SSE-C in s3a filesystem. (Steve Moist via lei)

2017-02-14 Thread subru
HADOOP-13075. Add support for SSE-KMS and SSE-C in s3a filesystem. (Steve Moist 
via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/839b690e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/839b690e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/839b690e

Branch: refs/heads/YARN-2915
Commit: 839b690ed5edc2ac4984640d58c005bb63cd8a07
Parents: 649deb7
Author: Lei Xu 
Authored: Sat Feb 11 13:59:03 2017 -0800
Committer: Lei Xu 
Committed: Sat Feb 11 13:59:03 2017 -0800

--
 hadoop-tools/hadoop-aws/pom.xml |   2 +
 .../org/apache/hadoop/fs/s3a/Constants.java |  22 ++-
 .../hadoop/fs/s3a/S3ABlockOutputStream.java |   1 +
 .../hadoop/fs/s3a/S3AEncryptionMethods.java |  61 
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 131 ++--
 .../apache/hadoop/fs/s3a/S3AInputStream.java|  24 ++-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  10 ++
 .../hadoop/fs/s3a/S3ObjectAttributes.java   |  59 +++
 .../src/site/markdown/tools/hadoop-aws/index.md |  29 +++-
 .../fs/s3a/AbstractTestS3AEncryption.java   | 132 
 .../hadoop/fs/s3a/ITestS3AEncryption.java   |  97 
 .../ITestS3AEncryptionAlgorithmPropagation.java |  76 --
 .../ITestS3AEncryptionAlgorithmValidation.java  | 152 +++
 .../ITestS3AEncryptionBlockOutputStream.java|  36 -
 .../hadoop/fs/s3a/ITestS3AEncryptionSSEC.java   |  90 +++
 ...ITestS3AEncryptionSSECBlockOutputStream.java |  46 ++
 .../s3a/ITestS3AEncryptionSSEKMSDefaultKey.java |  57 +++
 .../ITestS3AEncryptionSSEKMSUserDefinedKey.java |  48 ++
 ...onSSEKMSUserDefinedKeyBlockOutputStream.java |  52 +++
 .../hadoop/fs/s3a/ITestS3AEncryptionSSES3.java  |  43 ++
 ...TestS3AEncryptionSSES3BlockOutputStream.java |  45 ++
 .../hadoop/fs/s3a/TestS3AGetFileStatus.java |  57 +--
 22 files changed, 1030 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/839b690e/hadoop-tools/hadoop-aws/pom.xml
--
diff --git a/hadoop-tools/hadoop-aws/pom.xml b/hadoop-tools/hadoop-aws/pom.xml
index 6fd503c..1f64b02 100644
--- a/hadoop-tools/hadoop-aws/pom.xml
+++ b/hadoop-tools/hadoop-aws/pom.xml
@@ -183,6 +183,7 @@
 
**/ITestJets3tNativeS3FileSystemContract.java
 **/ITest*Root*.java
 **/ITestS3AFileContextStatistics.java
+**/ITestS3AEncryptionSSE*.java
 **/ITestS3AHuge*.java
   
 
@@ -211,6 +212,7 @@
 **/ITest*Root*.java
 **/ITestS3AFileContextStatistics.java
 **/ITestS3AHuge*.java
+**/ITestS3AEncryptionSSE*.java
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/839b690e/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index 45d974c..414f951 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -216,17 +216,28 @@ public final class Constants {
   "fs.s3a.multipart.purge.age";
   public static final long DEFAULT_PURGE_EXISTING_MULTIPART_AGE = 86400;
 
-  // s3 server-side encryption
+  // s3 server-side encryption, see S3AEncryptionMethods for valid options
   public static final String SERVER_SIDE_ENCRYPTION_ALGORITHM =
   "fs.s3a.server-side-encryption-algorithm";
 
   /**
* The standard encryption algorithm AWS supports.
* Different implementations may support others (or none).
+   * Use the S3AEncryptionMethods instead when configuring
+   * which Server Side Encryption to use.
*/
+  @Deprecated
   public static final String SERVER_SIDE_ENCRYPTION_AES256 =
   "AES256";
 
+  /**
+   *  Used to specify which AWS KMS key to use if
+   *  SERVER_SIDE_ENCRYPTION_ALGORITHM is AWS_KMS (will default to aws/s3
+   *  master key if left blank) or with SSE_C, the actual AES 256 key.
+   */
+  public static final String SERVER_SIDE_ENCRYPTION_KEY =
+  "fs.s3a.server-side-encryption-key";
+
   //override signature algorithm used for signing requests
   public static final String SIGNING_ALGORITHM = "fs.s3a.signing-algorithm";
 
@@ -296,4 +307,13 @@ public final class Constants {
*/
   @InterfaceAudience.Private
   public 

[09/50] [abbrv] hadoop git commit: YARN-3933. FairScheduler: Multiple calls to completedContainer are not safe. (Shiwei Guo and Miklos Szegedi via kasha)

2017-02-14 Thread subru
YARN-3933. FairScheduler: Multiple calls to completedContainer are not safe. 
(Shiwei Guo and Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/646c6d65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/646c6d65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/646c6d65

Branch: refs/heads/YARN-2915
Commit: 646c6d6509f515b1373288869fb92807fa2ddc9b
Parents: cc45da7
Author: Karthik Kambatla 
Authored: Mon Feb 13 11:26:30 2017 -0800
Committer: Karthik Kambatla 
Committed: Mon Feb 13 11:26:30 2017 -0800

--
 .../scheduler/fair/FSAppAttempt.java|  9 +++-
 .../scheduler/fair/TestFairScheduler.java   | 55 +++-
 2 files changed, 61 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/646c6d65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 9e57fa7..6dfcc84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -140,6 +140,13 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   Container container = rmContainer.getContainer();
   ContainerId containerId = container.getId();
 
+  // Remove from the list of containers
+  if (liveContainers.remove(containerId) == null) {
+LOG.info("Additional complete request on completed container " +
+rmContainer.getContainerId());
+return;
+  }
+
   // Remove from the list of newly allocated containers if found
   newlyAllocatedContainers.remove(rmContainer);
 
@@ -151,8 +158,6 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 + " in state: " + rmContainer.getState() + " event:" + event);
   }
 
-  // Remove from the list of containers
-  liveContainers.remove(rmContainer.getContainerId());
   untrackContainerForPreemption(rmContainer);
 
   Resource containerResource = rmContainer.getContainer().getResource();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/646c6d65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index ce32459..da5d3ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -90,6 +90,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
@@ -99,6 +100,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerUpdates;
 import 

[07/50] [abbrv] hadoop git commit: YARN-6113. Re-direct NM Web Service to get container logs for finished applications. Contributed by Xuan Gong.

2017-02-14 Thread subru
YARN-6113. Re-direct NM Web Service to get container logs for finished 
applications. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/464ff479
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/464ff479
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/464ff479

Branch: refs/heads/YARN-2915
Commit: 464ff479ceec76609dca3539081de6b503b17325
Parents: 243c0f3
Author: Junping Du 
Authored: Mon Feb 13 06:12:54 2017 -0800
Committer: Junping Du 
Committed: Mon Feb 13 06:12:54 2017 -0800

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  5 +-
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 31 ++-
 .../src/main/resources/yarn-default.xml |  8 ++
 .../nodemanager/webapp/NMWebServices.java   | 45 +-
 .../nodemanager/webapp/TestNMWebServices.java   | 93 ++--
 5 files changed, 167 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/464ff479/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7887fbc..136227a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1077,7 +1077,10 @@ public class YarnConfiguration extends Configuration {
 
   public static final String YARN_LOG_SERVER_URL =
 YARN_PREFIX + "log.server.url";
-  
+
+  public static final String YARN_LOG_SERVER_WEBSERVICE_URL =
+  YARN_PREFIX + "log.server.web-service.url";
+
   public static final String YARN_TRACKING_URL_GENERATOR = 
   YARN_PREFIX + "tracking.url.generator";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/464ff479/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 89f0551..e412173 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -26,6 +26,7 @@ import java.net.UnknownHostException;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -428,7 +429,8 @@ public class WebAppUtils {
 return Arrays.asList("text", "octet-stream");
   }
 
-  private static String getURLEncodedQueryString(HttpServletRequest request) {
+  private static String getURLEncodedQueryString(HttpServletRequest request,
+  String parameterToRemove) {
 String queryString = request.getQueryString();
 if (queryString != null && !queryString.isEmpty()) {
   String reqEncoding = request.getCharacterEncoding();
@@ -436,20 +438,41 @@ public class WebAppUtils {
 reqEncoding = "ISO-8859-1";
   }
   Charset encoding = Charset.forName(reqEncoding);
-  List params = URLEncodedUtils.parse(queryString, 
encoding);
+  List params = URLEncodedUtils.parse(queryString,
+  encoding);
+  if (parameterToRemove != null && !parameterToRemove.isEmpty()) {
+Iterator paramIterator = params.iterator();
+while(paramIterator.hasNext()) {
+  NameValuePair current = paramIterator.next();
+  if (current.getName().equals(parameterToRemove)) {
+paramIterator.remove();
+  }
+}
+  }
   return URLEncodedUtils.format(params, encoding);
 }
 return null;
   }
 
   /**
+* Get a query string which removes the passed parameter.
+* @param httpRequest HttpServletRequest with the request details
+* @param parameterName the query parameters must be removed
+* @return the query parameter string
+*/
+  public static String removeQueryParams(HttpServletRequest httpRequest,
+  String parameterName) {
+return getURLEncodedQueryString(httpRequest, parameterName);
+ 

hadoop git commit: HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre Villard via Yongjun Zhang)

2017-02-14 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8f9aae019 -> 5bb135092


HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre 
Villard via Yongjun Zhang)

(cherry picked from commit 8e53f2b9b08560bf4f8e81e697063277dbdc68f9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5bb13509
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5bb13509
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5bb13509

Branch: refs/heads/branch-2.8
Commit: 5bb1350922755317f277d2f3d59d1c2fc940ccb3
Parents: 8f9aae0
Author: Yongjun Zhang 
Authored: Tue Feb 14 12:47:06 2017 -0800
Committer: Yongjun Zhang 
Committed: Tue Feb 14 14:14:58 2017 -0800

--
 .../hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java   | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5bb13509/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index b0421f2..0f2a279 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -71,6 +72,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -103,6 +105,10 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
 throws IOException {
 this.conf = conf;
 this.confForCreate = confForCreate;
+/** set user pattern based on configuration file */
+UserParam.setUserPattern(
+conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
+DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre Villard via Yongjun Zhang)

2017-02-14 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 37921b3fe -> eb5500f0e


HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre 
Villard via Yongjun Zhang)

(cherry picked from commit 8e53f2b9b08560bf4f8e81e697063277dbdc68f9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb5500f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb5500f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb5500f0

Branch: refs/heads/branch-2
Commit: eb5500f0e7199efaa9a9805aeff5432ad4faf0ec
Parents: 37921b3
Author: Yongjun Zhang 
Authored: Tue Feb 14 12:47:06 2017 -0800
Committer: Yongjun Zhang 
Committed: Tue Feb 14 14:11:45 2017 -0800

--
 .../hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java   | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb5500f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index 0b759f8..61170c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -54,6 +55,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -107,6 +109,10 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
 throws IOException {
 this.conf = conf;
 this.confForCreate = confForCreate;
+/** set user pattern based on configuration file */
+UserParam.setUserPattern(
+conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
+DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei Gu via kasha)

2017-02-14 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d72f1c597 -> 37921b3fe


YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei 
Gu via kasha)

(cherry picked from commit 652679aa8ad6f9e61b8ed8e2b04b3e0332025e94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37921b3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37921b3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37921b3f

Branch: refs/heads/branch-2
Commit: 37921b3fef26e74a1daccaa278e895bc01d45a1e
Parents: d72f1c5
Author: Karthik Kambatla 
Authored: Tue Feb 14 13:39:34 2017 -0800
Committer: Karthik Kambatla 
Committed: Tue Feb 14 13:40:06 2017 -0800

--
 .../hadoop/yarn/client/TestRMFailover.java  | 100 ++-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +
 .../server/resourcemanager/RMContextImpl.java   |  10 ++
 ...MCriticalThreadUncaughtExceptionHandler.java |  58 +++
 .../resourcemanager/RMFatalEventType.java   |   5 +-
 .../server/resourcemanager/ResourceManager.java |  65 +---
 .../resourcemanager/recovery/RMStateStore.java  |  13 +--
 .../DominantResourceFairnessPolicy.java |   2 +-
 8 files changed, 226 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37921b3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index b58a775..4bf6a78 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -22,7 +22,10 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
@@ -37,14 +40,18 @@ import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.junit.After;
@@ -174,7 +181,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 // so it transitions to standby.
 ResourceManager rm = cluster.getResourceManager(
 cluster.getActiveRMIndex());
-rm.handleTransitionToStandBy();
+rm.handleTransitionToStandByInNewThread();
 int maxWaitingAttempts = 2000;
 while (maxWaitingAttempts-- > 0 ) {
   if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
@@ -349,4 +356,95 @@ public class TestRMFailover extends ClientBaseWithFixes {
 }
 return redirectUrl;
   }
+
+  /**
+   * Throw {@link RuntimeException} inside a thread of
+   * {@link ResourceManager} with HA enabled and check if the
+   * {@link ResourceManager} is transited to standby state.
+   *
+   * @throws InterruptedException if any
+   */
+  @Test
+  public void testUncaughtExceptionHandlerWithHAEnabled()
+  throws InterruptedException {
+conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
+cluster.init(conf);
+cluster.start();
+assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
+
+ResourceManager resourceManager = 

hadoop git commit: HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre Villard via Yongjun Zhang)

2017-02-14 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 652679aa8 -> 8e53f2b9b


HDFS-11391. Numeric usernames do no work with WebHDFS FS write access. (Pierre 
Villard via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e53f2b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e53f2b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e53f2b9

Branch: refs/heads/trunk
Commit: 8e53f2b9b08560bf4f8e81e697063277dbdc68f9
Parents: 652679a
Author: Yongjun Zhang 
Authored: Tue Feb 14 12:47:06 2017 -0800
Committer: Yongjun Zhang 
Committed: Tue Feb 14 13:40:53 2017 -0800

--
 .../hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java   | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e53f2b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index 095f41d..f8c15fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -108,6 +110,10 @@ public class WebHdfsHandler extends 
SimpleChannelInboundHandler {
 throws IOException {
 this.conf = conf;
 this.confForCreate = confForCreate;
+/** set user pattern based on configuration file */
+UserParam.setUserPattern(
+conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
+DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei Gu via kasha)

2017-02-14 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk aaf271323 -> 652679aa8


YARN-6061. Add an UncaughtExceptionHandler for critical threads in RM. (Yufei 
Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/652679aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/652679aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/652679aa

Branch: refs/heads/trunk
Commit: 652679aa8ad6f9e61b8ed8e2b04b3e0332025e94
Parents: aaf2713
Author: Karthik Kambatla 
Authored: Tue Feb 14 13:39:34 2017 -0800
Committer: Karthik Kambatla 
Committed: Tue Feb 14 13:39:41 2017 -0800

--
 .../hadoop/yarn/client/TestRMFailover.java  | 100 ++-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +
 .../server/resourcemanager/RMContextImpl.java   |  10 ++
 ...MCriticalThreadUncaughtExceptionHandler.java |  58 +++
 .../resourcemanager/RMFatalEventType.java   |   5 +-
 .../server/resourcemanager/ResourceManager.java |  65 +---
 .../resourcemanager/recovery/RMStateStore.java  |  13 +--
 .../DominantResourceFairnessPolicy.java |   2 +-
 8 files changed, 226 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/652679aa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
index b58a775..4bf6a78 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
@@ -22,7 +22,10 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
 import java.net.HttpURLConnection;
@@ -37,14 +40,18 @@ import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
 import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.RMCriticalThreadUncaughtExceptionHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 import org.junit.After;
@@ -174,7 +181,7 @@ public class TestRMFailover extends ClientBaseWithFixes {
 // so it transitions to standby.
 ResourceManager rm = cluster.getResourceManager(
 cluster.getActiveRMIndex());
-rm.handleTransitionToStandBy();
+rm.handleTransitionToStandByInNewThread();
 int maxWaitingAttempts = 2000;
 while (maxWaitingAttempts-- > 0 ) {
   if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
@@ -349,4 +356,95 @@ public class TestRMFailover extends ClientBaseWithFixes {
 }
 return redirectUrl;
   }
+
+  /**
+   * Throw {@link RuntimeException} inside a thread of
+   * {@link ResourceManager} with HA enabled and check if the
+   * {@link ResourceManager} is transited to standby state.
+   *
+   * @throws InterruptedException if any
+   */
+  @Test
+  public void testUncaughtExceptionHandlerWithHAEnabled()
+  throws InterruptedException {
+conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster");
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort);
+cluster.init(conf);
+cluster.start();
+assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex());
+
+ResourceManager resourceManager = cluster.getResourceManager(
+cluster.getActiveRMIndex());
+
+final 

hadoop git commit: HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use volatile instead of synchronized. Contributed by Chen Liang.

2017-02-14 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 92dbdbc49 -> d72f1c597


HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use 
volatile instead of synchronized. Contributed by Chen Liang.

(cherry picked from commit aaf27132350547fcde1fdb372f19626838f44bc4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d72f1c59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d72f1c59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d72f1c59

Branch: refs/heads/branch-2
Commit: d72f1c597616d703094ccb9d6600ae31cedba98b
Parents: 92dbdbc
Author: Xiaoyu Yao 
Authored: Tue Feb 14 12:52:34 2017 -0800
Committer: Xiaoyu Yao 
Committed: Tue Feb 14 12:55:49 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java   | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72f1c59/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index c6a69ab..acbcffa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -51,7 +51,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private long lastUpdate;
   private long lastUpdateMonotonic;
   private int xceiverCount;
-  private String location = NetworkTopology.DEFAULT_RACK;
+  private volatile String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
@@ -292,10 +292,12 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   }
 
   /** network location */
-  public synchronized String getNetworkLocation() {return location;}
+  @Override
+  public String getNetworkLocation() {return location;}
 
   /** Sets the network location */
-  public synchronized void setNetworkLocation(String location) {
+  @Override
+  public void setNetworkLocation(String location) {
 this.location = NodeBase.normalize(location);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use volatile instead of synchronized. Contributed by Chen Liang.

2017-02-14 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0cf599371 -> aaf271323


HDFS-11409. DatanodeInfo getNetworkLocation and setNetworkLocation shoud use 
volatile instead of synchronized. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aaf27132
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aaf27132
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aaf27132

Branch: refs/heads/trunk
Commit: aaf27132350547fcde1fdb372f19626838f44bc4
Parents: 0cf5993
Author: Xiaoyu Yao 
Authored: Tue Feb 14 12:52:34 2017 -0800
Committer: Xiaoyu Yao 
Committed: Tue Feb 14 12:52:34 2017 -0800

--
 .../java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf27132/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 41735b1..acbcffa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -51,7 +51,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private long lastUpdate;
   private long lastUpdateMonotonic;
   private int xceiverCount;
-  private String location = NetworkTopology.DEFAULT_RACK;
+  private volatile String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
   private List dependentHostNames = new LinkedList<>();
   private String upgradeDomain;
@@ -293,11 +293,11 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
 
   /** network location */
   @Override
-  public synchronized String getNetworkLocation() {return location;}
+  public String getNetworkLocation() {return location;}
 
   /** Sets the network location */
   @Override
-  public synchronized void setNetworkLocation(String location) {
+  public void setNetworkLocation(String location) {
 this.location = NodeBase.normalize(location);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6173. Add artifact info and privileged container details to the container info in API GET response. Contributed by Gour Saha

2017-02-14 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services 732c5714e -> 752b54854


YARN-6173. Add artifact info and privileged container details to the container 
info in API GET response. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/752b5485
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/752b5485
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/752b5485

Branch: refs/heads/yarn-native-services
Commit: 752b54854ed50c7c0902dc4fa07f2e8e3b8bcf79
Parents: 732c571
Author: Billie Rinaldi 
Authored: Tue Feb 14 09:52:41 2017 -0800
Committer: Billie Rinaldi 
Committed: Tue Feb 14 09:52:41 2017 -0800

--
 .../api/impl/ApplicationApiService.java | 18 +++
 .../yarn/services/resource/Container.java   | 53 
 ...RN-Simplified-V1-API-Layer-For-Services.yaml |  6 +++
 3 files changed, 67 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/752b5485/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index b11da2c..7028caa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -1000,6 +1000,19 @@ public class ApplicationApiService implements 
ApplicationApi {
   resource.setCpus(jsonGetAsInt(componentRole, "yarn.vcores"));
   resource.setMemory(jsonGetAsString(componentRole, 
"yarn.memory"));
   container.setResource(resource);
+  Artifact artifact = new Artifact();
+  String dockerImageName = jsonGetAsString(componentRole,
+  "docker.image");
+  if (StringUtils.isNotEmpty(dockerImageName)) {
+artifact.setId(dockerImageName);
+artifact.setType(Artifact.TypeEnum.DOCKER);
+  } else {
+// Might have to handle tarballs here
+artifact.setType(null);
+  }
+  container.setArtifact(artifact);
+  container.setPrivilegedContainer(
+  jsonGetAsBoolean(componentRole, "docker.usePrivileged"));
   // TODO: add container property - for response only?
   app.addContainer(container);
 }
@@ -1057,6 +1070,11 @@ public class ApplicationApiService implements 
ApplicationApi {
 : object.get(key).isJsonNull() ? null : object.get(key).getAsInt();
   }
 
+  private Boolean jsonGetAsBoolean(JsonObject object, String key) {
+return object.get(key) == null ? null
+: object.get(key).isJsonNull() ? null : object.get(key).getAsBoolean();
+  }
+
   private JsonObject jsonGetAsObject(JsonObject object, String key) {
 return object.get(key) == null ? null : object.get(key).getAsJsonObject();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/752b5485/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
index f11c7b3..24aada7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Container.java
@@ -48,6 +48,8 @@ public class Container extends BaseResource {
   private ContainerState state = null;
   private String componentName = null;
   private Resource resource = null;
+  private Artifact artifact = null;
+  private Boolean privilegedContainer = null;
 
   /**

hadoop git commit: HDFS-11390. Add process name to httpfs process. Contributed by Weiwei Yang.

2017-02-14 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 eca70e4bf -> 8f9aae019


HDFS-11390. Add process name to httpfs process. Contributed by Weiwei Yang.

(cherry picked from commit 92dbdbc490a527054388dc2fc6751aa8268856ef)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f9aae01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f9aae01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f9aae01

Branch: refs/heads/branch-2.8
Commit: 8f9aae0193bc3696cb2047ff5f8d019579ad345f
Parents: eca70e4
Author: Xiao Chen 
Authored: Tue Feb 14 09:46:55 2017 -0800
Committer: Xiao Chen 
Committed: Tue Feb 14 09:47:41 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f9aae01/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
index a593b67..203024e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
@@ -36,7 +36,8 @@ source 
${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}/httpfs-config.sh
 #
 print "Using   CATALINA_OPTS:   ${CATALINA_OPTS}"
 
-catalina_opts="-Dhttpfs.home.dir=${HTTPFS_HOME}";
+catalina_opts="-Dproc_httpfs";
+catalina_opts="${catalina_opts} -Dhttpfs.home.dir=${HTTPFS_HOME}";
 catalina_opts="${catalina_opts} -Dhttpfs.config.dir=${HTTPFS_CONFIG}";
 catalina_opts="${catalina_opts} -Dhttpfs.log.dir=${HTTPFS_LOG}";
 catalina_opts="${catalina_opts} -Dhttpfs.temp.dir=${HTTPFS_TEMP}";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11390. Add process name to httpfs process. Contributed by Weiwei Yang.

2017-02-14 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8e291e101 -> 92dbdbc49


HDFS-11390. Add process name to httpfs process. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92dbdbc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92dbdbc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92dbdbc4

Branch: refs/heads/branch-2
Commit: 92dbdbc490a527054388dc2fc6751aa8268856ef
Parents: 8e291e1
Author: Xiao Chen 
Authored: Tue Feb 14 09:46:55 2017 -0800
Committer: Xiao Chen 
Committed: Tue Feb 14 09:46:55 2017 -0800

--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92dbdbc4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
index a593b67..203024e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
@@ -36,7 +36,8 @@ source 
${HADOOP_LIBEXEC_DIR:-${BASEDIR}/libexec}/httpfs-config.sh
 #
 print "Using   CATALINA_OPTS:   ${CATALINA_OPTS}"
 
-catalina_opts="-Dhttpfs.home.dir=${HTTPFS_HOME}";
+catalina_opts="-Dproc_httpfs";
+catalina_opts="${catalina_opts} -Dhttpfs.home.dir=${HTTPFS_HOME}";
 catalina_opts="${catalina_opts} -Dhttpfs.config.dir=${HTTPFS_CONFIG}";
 catalina_opts="${catalina_opts} -Dhttpfs.log.dir=${HTTPFS_LOG}";
 catalina_opts="${catalina_opts} -Dhttpfs.temp.dir=${HTTPFS_TEMP}";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML processor. Contributed by Wei-Chiu Chuang.

2017-02-14 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4718f6554 -> eca70e4bf


HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML 
processor. Contributed by Wei-Chiu Chuang.

(cherry picked from commit 89cadb42111e4ffbd3f4bde8250013bba23eb51e)
(cherry picked from commit 8e291e1015c4694f7b11cd97a6308d0d92a884cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eca70e4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eca70e4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eca70e4b

Branch: refs/heads/branch-2.8
Commit: eca70e4bff2a88189ef6742766a28c7aad1e0aa7
Parents: 4718f65
Author: Wei-Chiu Chuang 
Authored: Tue Feb 14 08:59:12 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Tue Feb 14 09:09:01 2017 -0800

--
 .../tools/offlineImageViewer/TestOfflineImageViewer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eca70e4b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index a7c30ec..00702c4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -158,6 +160,15 @@ public class TestOfflineImageViewer {
   hdfs.mkdirs(invalidXMLDir);
   dirCount++;
 
+  //Create a directory with sticky bits
+  Path stickyBitDir = new Path("/stickyBit");
+  hdfs.mkdirs(stickyBitDir);
+  hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL,
+  FsAction.ALL, FsAction.ALL, true));
+  dirCount++;
+  writtenFiles.put(stickyBitDir.toString(),
+  hdfs.getFileStatus(stickyBitDir));
+
   // Get delegation tokens so we log the delegation token op
   Token[] delegationTokens = hdfs
   .addDelegationTokens(TEST_RENEWER, null);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML processor. Contributed by Wei-Chiu Chuang.

2017-02-14 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 25ac54a52 -> 8e291e101


HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML 
processor. Contributed by Wei-Chiu Chuang.

(cherry picked from commit 89cadb42111e4ffbd3f4bde8250013bba23eb51e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e291e10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e291e10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e291e10

Branch: refs/heads/branch-2
Commit: 8e291e1015c4694f7b11cd97a6308d0d92a884cb
Parents: 25ac54a
Author: Wei-Chiu Chuang 
Authored: Tue Feb 14 08:59:12 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Tue Feb 14 09:08:37 2017 -0800

--
 .../tools/offlineImageViewer/TestOfflineImageViewer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e291e10/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 740a8ab..dacbb85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -158,6 +160,15 @@ public class TestOfflineImageViewer {
   hdfs.mkdirs(invalidXMLDir);
   dirCount++;
 
+  //Create a directory with sticky bits
+  Path stickyBitDir = new Path("/stickyBit");
+  hdfs.mkdirs(stickyBitDir);
+  hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL,
+  FsAction.ALL, FsAction.ALL, true));
+  dirCount++;
+  writtenFiles.put(stickyBitDir.toString(),
+  hdfs.getFileStatus(stickyBitDir));
+
   // Get delegation tokens so we log the delegation token op
   Token[] delegationTokens = hdfs
   .addDelegationTokens(TEST_RENEWER, null);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML processor. Contributed by Wei-Chiu Chuang.

2017-02-14 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1fa084c42 -> 0cf599371


HDFS-11084. Add a regression test for sticky bit support of OIV ReverseXML 
processor. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cf59937
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cf59937
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cf59937

Branch: refs/heads/trunk
Commit: 0cf5993712a01993bd701bd9664e6af284378b55
Parents: 1fa084c
Author: Wei-Chiu Chuang 
Authored: Tue Feb 14 08:59:12 2017 -0800
Committer: Wei-Chiu Chuang 
Committed: Tue Feb 14 09:11:55 2017 -0800

--
 .../tools/offlineImageViewer/TestOfflineImageViewer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf59937/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 740a8ab..dacbb85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -158,6 +160,15 @@ public class TestOfflineImageViewer {
   hdfs.mkdirs(invalidXMLDir);
   dirCount++;
 
+  //Create a directory with sticky bits
+  Path stickyBitDir = new Path("/stickyBit");
+  hdfs.mkdirs(stickyBitDir);
+  hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL,
+  FsAction.ALL, FsAction.ALL, true));
+  dirCount++;
+  writtenFiles.put(stickyBitDir.toString(),
+  hdfs.getFileStatus(stickyBitDir));
+
   // Get delegation tokens so we log the delegation token op
   Token[] delegationTokens = hdfs
   .addDelegationTokens(TEST_RENEWER, null);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5912. Fix breadcrumb issues for various pages in new YARN UI. Contributed by Akhil P B.

2017-02-14 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk b9f849125 -> 1fa084c42


YARN-5912. Fix breadcrumb issues for various pages in new YARN UI. Contributed 
by Akhil P B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fa084c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fa084c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fa084c4

Branch: refs/heads/trunk
Commit: 1fa084c4254b89cd45210727ccb68725d583ff62
Parents: b9f8491
Author: Sunil G 
Authored: Tue Feb 14 22:29:21 2017 +0530
Committer: Sunil G 
Committed: Tue Feb 14 22:29:21 2017 +0530

--
 .../webapp/app/controllers/yarn-app-attempt.js  |  2 +-
 .../webapp/app/controllers/yarn-app-attempts.js |  2 +-
 .../src/main/webapp/app/controllers/yarn-app.js |  2 +-
 .../main/webapp/app/controllers/yarn-apps.js|  2 +-
 .../app/controllers/yarn-container-log.js   |  7 +++-
 .../webapp/app/controllers/yarn-node-app.js |  7 +++-
 .../webapp/app/controllers/yarn-node-apps.js|  2 +-
 .../app/controllers/yarn-node-container.js  | 39 
 .../app/controllers/yarn-node-containers.js |  2 +-
 .../main/webapp/app/controllers/yarn-node.js|  2 +-
 .../webapp/app/controllers/yarn-services.js |  2 +-
 .../src/main/webapp/app/models/yarn-app.js  |  2 +-
 .../src/main/webapp/app/routes/yarn-node-app.js |  2 +-
 .../webapp/app/routes/yarn-node-container.js|  2 +-
 .../controllers/yarn-node-container-test.js | 30 +++
 15 files changed, 90 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index a458842..4c02361 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
index 9ebc2a6..92de2f9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempts.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
index 309c895..9c1cb5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app.js
@@ -27,7 +27,7 @@ export default Ember.Controller.extend({
   routeName: 'application'
 },{
   text: "Applications",
-  routeName: 'yarn-apps'
+  routeName: 'yarn-apps.apps'
 }, {
   text: `App [${appId}]`,
   routeName: 'yarn-app',

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fa084c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-apps.js
 

hadoop git commit: HADOOP-14058. Fix NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. Contributed by Yiqun Lin.

2017-02-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 233252aa9 -> 4718f6554


HADOOP-14058. Fix 
NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. 
Contributed by Yiqun Lin.

(cherry picked from commit b9f8491252f5a23a91a1d695d748556a0fd803ae)
(cherry picked from commit 25ac54a52eff0d5b8f4bee83b6f4471277a3ac61)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4718f655
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4718f655
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4718f655

Branch: refs/heads/branch-2.8
Commit: 4718f65545ab28b501605134d384f3247ac60fad
Parents: 233252a
Author: Akira Ajisaka 
Authored: Wed Feb 15 01:45:56 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 01:47:21 2017 +0900

--
 .../hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4718f655/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index 3b50515..d303a92 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -85,7 +85,7 @@ public abstract class NativeS3FileSystemContractBaseTest
 
   public void testDirWithDifferentMarkersWorks() throws Exception {
 
-for (int i = 0; i < 3; i++) {
+for (int i = 0; i <= 3; i++) {
   String base = "test/hadoop" + i;
   Path path = path("/" + base);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14058. Fix NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. Contributed by Yiqun Lin.

2017-02-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 30ff5bff1 -> 25ac54a52


HADOOP-14058. Fix 
NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. 
Contributed by Yiqun Lin.

(cherry picked from commit b9f8491252f5a23a91a1d695d748556a0fd803ae)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25ac54a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25ac54a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25ac54a5

Branch: refs/heads/branch-2
Commit: 25ac54a52eff0d5b8f4bee83b6f4471277a3ac61
Parents: 30ff5bf
Author: Akira Ajisaka 
Authored: Wed Feb 15 01:45:56 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 01:46:59 2017 +0900

--
 .../hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25ac54a5/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index 3b50515..d303a92 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -85,7 +85,7 @@ public abstract class NativeS3FileSystemContractBaseTest
 
   public void testDirWithDifferentMarkersWorks() throws Exception {
 
-for (int i = 0; i < 3; i++) {
+for (int i = 0; i <= 3; i++) {
   String base = "test/hadoop" + i;
   Path path = path("/" + base);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14058. Fix NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. Contributed by Yiqun Lin.

2017-02-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk aaf106fde -> b9f849125


HADOOP-14058. Fix 
NativeS3FileSystemContractBaseTest#testDirWithDifferentMarkersWorks. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f84912
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f84912
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f84912

Branch: refs/heads/trunk
Commit: b9f8491252f5a23a91a1d695d748556a0fd803ae
Parents: aaf106f
Author: Akira Ajisaka 
Authored: Wed Feb 15 01:45:56 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Feb 15 01:45:56 2017 +0900

--
 .../hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f84912/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
index ef223ac..261f79b 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/NativeS3FileSystemContractBaseTest.java
@@ -85,7 +85,7 @@ public abstract class NativeS3FileSystemContractBaseTest
 
   public void testDirWithDifferentMarkersWorks() throws Exception {
 
-for (int i = 0; i < 3; i++) {
+for (int i = 0; i <= 3; i++) {
   String base = "test/hadoop" + i;
   Path path = path("/" + base);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5966. AMRMClient changes to support ExecutionType update. (asuresh)

2017-02-14 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4164a2032 -> aaf106fde


YARN-5966. AMRMClient changes to support ExecutionType update. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aaf106fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aaf106fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aaf106fd

Branch: refs/heads/trunk
Commit: aaf106fde35ec97e2e2ea4d7a67434038c4273ac
Parents: 4164a20
Author: Arun Suresh 
Authored: Tue Feb 14 06:08:27 2017 -0800
Committer: Arun Suresh 
Committed: Tue Feb 14 06:09:10 2017 -0800

--
 .../yarn/api/records/UpdateContainerError.java  |  19 +-
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../hadoop/yarn/client/api/AMRMClient.java  |  33 +-
 .../yarn/client/api/async/AMRMClientAsync.java  |  33 +-
 .../api/async/impl/AMRMClientAsyncImpl.java |   7 +-
 .../yarn/client/api/impl/AMRMClientImpl.java| 111 +++--
 .../yarn/client/api/impl/TestAMRMClient.java|  60 ++-
 .../api/impl/TestAMRMClientOnRMRestart.java |   8 +-
 .../TestOpportunisticContainerAllocation.java   | 400 +--
 .../impl/pb/UpdateContainerErrorPBImpl.java |  16 +
 .../server/resourcemanager/RMServerUtils.java   |  14 +-
 ...pportunisticContainerAllocatorAMService.java |   5 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   4 +-
 13 files changed, 587 insertions(+), 124 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
index e7458cf..4d184cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/UpdateContainerError.java
@@ -59,6 +59,22 @@ public abstract class UpdateContainerError {
   public abstract void setReason(String reason);
 
   /**
+   * Get current container version.
+   * @return Current container Version.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract int getCurrentContainerVersion();
+
+  /**
+   * Set current container version.
+   * @param currentVersion Current container version.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Unstable
+  public abstract void setCurrentContainerVersion(int currentVersion);
+
+  /**
* Get the {@code UpdateContainerRequest} that was not satisfiable.
* @return UpdateContainerRequest
*/
@@ -89,6 +105,7 @@ public abstract class UpdateContainerError {
   @Override
   public String toString() {
 return "UpdateContainerError{reason=" + getReason() + ", "
++ "currentVersion=" + getCurrentContainerVersion() + ", "
 + "req=" + getUpdateContainerRequest() + "}";
   }
 
@@ -120,6 +137,6 @@ public abstract class UpdateContainerError {
 } else if (!req.equals(other.getUpdateContainerRequest())) {
   return false;
 }
-return true;
+return getCurrentContainerVersion() == other.getCurrentContainerVersion();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index df3c852..c6647c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -78,6 +78,7 @@ message UpdateContainerRequestProto {
 message UpdateContainerErrorProto {
   optional string reason = 1;
   optional UpdateContainerRequestProto update_request = 2;
+  optional int32 current_container_version = 3;
 }
 
 message AllocateRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aaf106fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 

[Hadoop Wiki] Update of "HowToSetupYourDevelopmentEnvironment" by SteveLoughran

2017-02-14 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "HowToSetupYourDevelopmentEnvironment" page has been changed by 
SteveLoughran:
https://wiki.apache.org/hadoop/HowToSetupYourDevelopmentEnvironment?action=diff=34=35

Comment:
add the details on OSX install, especially protoc setup now that homebrew 1.x 
doesn't support protobuf 2.5

  This page describes how to get your environment setup and is IDE agnostic.
  
  = Requirements =
-  * Java 6 or 7
-  * Maven
+  * Java 7 or 8 (Branch 2) or Java 8 (trunk)
+  * Maven 3.3 or later
   * Your favorite IDE
+  * Protobuf 2.5.0
  
  = Setup Your Development Environment in Linux =
  
- The instructions below talk about how to get an environment setup using the 
command line to build, control source, and test.  These instructions are 
therefore IDE independent.  Take a look at EclipseEnvironment for instructions 
on how to configure Eclipse to build, control source, and test.  If you prefer 
ItelliJ IDEA, then take a look [[HadoopUnderIDEA| here]]
+ The instructions below talk about how to get an environment setup using the 
command line to build, control source, and test.  These instructions are 
therefore IDE independent.  Take a look at EclipseEnvironment for instructions 
on how to configure Eclipse to build, control source, and test.  If you prefer 
IntelliJ IDEA, then take a look [[HadoopUnderIDEA| here]]
  
-  * Choose a good place to put your code.  You will eventually use your source 
code to run Hadoop, so choose wisely. For example ~/code/hadoop.
+  * Choose a good place to put your code.  You will eventually use your source 
code to run Hadoop, so choose wisely. For example {{{~/code/hadoop}}}.
-  * Get the source.  This is documented in HowToContribute.  Put the source in 
~/code/hadoop (or whatever you chose) so that you have 
~/code/hadoop/hadoop-common
+  * Get the source.  This is documented in HowToContribute.  Put the source in 
{{{~/code/hadoop (or whatever you chose) so that you have 
{{{~/code/hadoop/hadoop-common}}}
-  * cd into ''hadoop-common'', or whatever you named the directory
+  * cd into {{{hadoop-common}}}, or whatever you named the directory
-  * attempt to run ''mvn install''
+  * attempt to run {{{mvn install}}} . To build without tests: {{{mvn install 
-DskipTests}}}
*  If you get any strange errors (other than JUnit test failures and 
errors), then consult the ''Build Errors'' section below.
   * follow GettingStartedWithHadoop to learn how to run Hadoop.
*  If you run in to any problems, refer to the ''Runtime Errors'' below, 
along with the troubleshooting document here: TroubleShooting
+ 
+ = Setup Your Development Environment in OSX =
+ 
+ 
+ The Linux instructions match, except that:
+ 
+ XCode is needed for the command line compiler and other tools. 
+ 
+ 
+ protobuf 2.5.0 needs to be built by hand, as macports and homebrew no longer 
ship that version.
+ 
+ Follow the instructions in the building from source 
[[http://sleepythread.blogspot.co.uk/2013/11/installing-protoc-25x-compiler-google.html|Installing
 protoc 2.5.x compiler on mac]] ''but change the URL for the protobuf archive 
to 
[[https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz]]''.
 
+ 
+ To verify that protobuf is  correctly installed, the command {{{protoc 
--version}}} must print out the string {{{libprotoc 2.5.0}}}.
+ 
  
  = Run HDFS in pseudo-distributed mode from the dev tree =
  

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11408. The config name of balance bandwidth is out of date. Contributed by Yiqun Lin.

2017-02-14 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 719df99c0 -> 4164a2032


HDFS-11408. The config name of balance bandwidth is out of date. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4164a203
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4164a203
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4164a203

Branch: refs/heads/trunk
Commit: 4164a2032a41e7318749efd0301751eb2b369cdc
Parents: 719df99
Author: Yiqun Lin 
Authored: Tue Feb 14 18:57:20 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 14 18:57:20 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 2 +-
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java| 2 +-
 .../main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java | 2 +-
 .../hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java | 2 +-
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 +-
 .../hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java  | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md  | 2 +-
 9 files changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7b6a4e5..e0ccd62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2233,7 +2233,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 
   /**
* Requests the namenode to tell all datanodes to use a new, non-persistent
-   * bandwidth value for dfs.balance.bandwidthPerSec.
+   * bandwidth value for dfs.datanode.balance.bandwidthPerSec.
* See {@link ClientProtocol#setBalancerBandwidth(long)}
* for more details.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index e9475d8..30dcfa4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1599,7 +1599,7 @@ public class DistributedFileSystem extends FileSystem {
 
   /**
* Requests the namenode to tell all datanodes to use a new, non-persistent
-   * bandwidth value for dfs.balance.bandwidthPerSec.
+   * bandwidth value for dfs.datanode.balance.bandwidthPerSec.
* The bandwidth parameter is the max number of bytes per second of network
* bandwidth to be used by a datanode during balancing.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 407621b..eaebd6f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -938,7 +938,7 @@ public interface ClientProtocol {
 
   /**
* Tell all datanodes to use a new, non-persistent bandwidth value for
-   * dfs.balance.bandwidthPerSec.
+   * dfs.datanode.balance.bandwidthPerSec.
*
* @param bandwidth Blanacer bandwidth in bytes per second for this datanode.
* @throws IOException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4164a203/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

hadoop git commit: HDFS-11407. Document the missing usages of OfflineImageViewer processors. Contributed by Yiqun Lin.

2017-02-14 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk 71c23c9fc -> 719df99c0


HDFS-11407. Document the missing usages of OfflineImageViewer processors. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/719df99c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/719df99c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/719df99c

Branch: refs/heads/trunk
Commit: 719df99c05a4e0bc62a7cad6d22c8dbe5d92bde0
Parents: 71c23c9
Author: Yiqun Lin 
Authored: Tue Feb 14 18:48:08 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Feb 14 18:48:08 2017 +0800

--
 .../src/site/markdown/HdfsImageViewer.md| 63 
 1 file changed, 63 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/719df99c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index b677f6a..10d98b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -133,6 +133,69 @@ Applying the Offline Image Viewer with XML processor would 
result in the followi
  
...remaining output omitted...
 
+### ReverseXML Processor
+
+ReverseXML processor is the opposite of the XML processor. Users can specify 
input XML file and output fsimage file via -i and -o command-line.
+
+   bash$ bin/hdfs oiv -p ReverseXML -i fsimage.xml -o fsimage
+
+This will reconstruct an fsimage from an XML file.
+
+### FileDistribution Processor
+
+FileDistribution processor can analyze file sizes in the namespace image. 
Users can specify maxSize (128GB by default) and step (2MB by default) in bytes 
via -maxSize and -step command-line.
+
+   bash$ bin/hdfs oiv -p FileDistribution -maxSize maxSize -step size -i 
fsimage -o output
+
+The processor will calculate how many files in the system fall into each 
segment. The output file is formatted as a tab separated two column table 
showed as the following output:
+
+   SizeNumFiles
+   4   1
+   12  1
+   16  1
+   20  1
+   totalFiles = 4
+   totalDirectories = 2
+   totalBlocks = 4
+   totalSpace = 48
+   maxFileSize = 21
+
+To make the output result look more readable, users can specify -format option 
in addition.
+
+   bash$ bin/hdfs oiv -p FileDistribution -maxSize maxSize -step size 
-format -i fsimage -o output
+
+This would result in the following output:
+
+   Size Range  NumFiles
+   (0 B, 4 B]  1
+   (8 B, 12 B] 1
+   (12 B, 16 B]1
+   (16 B, 21 B]1
+   totalFiles = 4
+   totalDirectories = 2
+   totalBlocks = 4
+   totalSpace = 48
+   maxFileSize = 21
+
+### Delimited Processor
+
+Delimited processor generates a text representation of the fsimage, with each 
element separated by a delimiter string (\t by default). Users can specify a 
new delimiter string by -delimiter option.
+
+   bash$ bin/hdfs oiv -p Delimited -delimiter delimiterString -i fsimage 
-o output
+
+In addition, users can specify a temporary dir to cache intermediate result by 
the following command:
+
+   bash$ bin/hdfs oiv -p Delimited -delimiter delimiterString -t 
temporaryDir -i fsimage -o output
+
+If not set, Delimited processor will construct the namespace in memory before 
outputting text. The output result of this processor should be like the 
following output:
+
+   PathReplication ModificationTimeAccessTime  
PreferredBlockSize  BlocksCount FileSizeNSQUOTA DSQUOTA 
Permission  UserNameGroupName
+   /   0   2017-02-13 10:391970-01-01 08:000   
0   0   9223372036854775807 -1  drwxr-xr-x  root
supergroup
+   /dir0   0   2017-02-13 10:391970-01-01 08:000   
0   0   -1  -1  drwxr-xr-x  rootsupergroup
+   /dir0/file0 1   2017-02-13 10:392017-02-13 10:39
134217728   1   1   0   0   -rw-r--r--  root
supergroup
+   /dir0/file1 1   2017-02-13 10:392017-02-13 10:39
134217728   1   1   0   0   -rw-r--r--  root
supergroup
+   /dir0/file2 1   2017-02-13 10:392017-02-13 10:39
134217728   1   1   0   0   -rw-r--r--  root
supergroup
+
 Options
 ---
 


-
To