hadoop git commit: HDFS-11082. Provide replicated EC policy to replicate files. Contributed by SammiChen.

2017-08-16 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 08aaa4b36 -> 96b3a6b97


HDFS-11082. Provide replicated EC policy to replicate files. Contributed by 
SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96b3a6b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96b3a6b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96b3a6b9

Branch: refs/heads/trunk
Commit: 96b3a6b9721e922d33fadc2459b561a85dbf9b8e
Parents: 08aaa4b
Author: Andrew Wang 
Authored: Wed Aug 16 22:17:06 2017 -0700
Committer: Andrew Wang 
Committed: Wed Aug 16 22:17:06 2017 -0700

--
 .../io/erasurecode/ErasureCodeConstants.java|  8 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  3 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  6 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|  6 +-
 .../hdfs/protocol/ErasureCodingPolicy.java  |  5 ++
 .../protocol/SystemErasureCodingPolicies.java   | 14 
 .../namenode/ErasureCodingPolicyManager.java| 13 ++-
 .../server/namenode/FSDirErasureCodingOp.java   | 13 ++-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  2 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 24 +-
 .../src/site/markdown/HDFSErasureCoding.md  | 16 ++--
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 81 ++
 .../hdfs/server/namenode/TestFSImage.java   | 87 
 .../test/resources/testErasureCodingConf.xml| 78 +-
 14 files changed, 331 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b3a6b9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index e0d7946..d3c3b6b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -30,6 +30,7 @@ public final class ErasureCodeConstants {
   public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
   public static final String XOR_CODEC_NAME = "xor";
   public static final String HHXOR_CODEC_NAME = "hhxor";
+  public static final String REPLICATION_CODEC_NAME = "replication";
 
   public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
   RS_CODEC_NAME, 6, 3);
@@ -45,4 +46,11 @@ public final class ErasureCodeConstants {
 
   public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
   RS_CODEC_NAME, 10, 4);
+
+  public static final ECSchema REPLICATION_1_2_SCHEMA = new ECSchema(
+  REPLICATION_CODEC_NAME, 1, 2);
+
+  public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
+  public static final byte REPLICATION_POLICY_ID = (byte) 63;
+  public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b3a6b9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 969522d..47c14e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3044,7 +3044,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
*
* @param src path to get the information for
* @return Returns the policy information if file or directory on the path is
-   * erasure coded, null otherwise
+   * erasure coded, null otherwise. Null will be returned if directory or file
+   * has REPLICATION policy.
* @throws IOException
*/
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96b3a6b9/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 8f82d03..ceec2b3 100644
--- 

hadoop git commit: YARN-6327. Removing queues from CapacitySchedulerQueueManager and ParentQueue should be done with iterator. Contributed by Jonathan Hung.

2017-08-16 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9de332d50 -> fa3094e43


YARN-6327. Removing queues from CapacitySchedulerQueueManager and ParentQueue 
should be done with iterator. Contributed by Jonathan Hung.

(cherry picked from commit 0a3aa40fe7878c939dbf4e6b43466595159ff930)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa3094e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa3094e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa3094e4

Branch: refs/heads/branch-2
Commit: fa3094e43697dbb7a2dda2b43d8c0c57bbec4b54
Parents: 9de332d
Author: Naganarasimha 
Authored: Wed Mar 15 01:22:25 2017 +0530
Committer: Naganarasimha 
Committed: Thu Aug 17 10:42:00 2017 +0530

--
 .../scheduler/capacity/CapacitySchedulerQueueManager.java | 7 +--
 .../resourcemanager/scheduler/capacity/ParentQueue.java   | 6 --
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3094e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index 70b5699..1ceb6fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -351,10 +352,12 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
 existingQueues.put(queueName, queue);
   }
 }
-for (Map.Entry e : existingQueues.entrySet()) {
+for (Iterator> itr = existingQueues.entrySet()
+.iterator(); itr.hasNext();) {
+  Map.Entry e = itr.next();
   String queueName = e.getKey();
   if (!newQueues.containsKey(queueName)) {
-existingQueues.remove(queueName);
+itr.remove();
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa3094e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index b6907c4..2e48000 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -343,10 +343,12 @@ public class ParentQueue extends AbstractCSQueue {
   }
 
   // remove the deleted queue in the refreshed xml.
-  for (Map.Entry e : currentChildQueues.entrySet()) {
+  for (Iterator> itr = currentChildQueues
+  .entrySet().iterator(); itr.hasNext();) {
+Map.Entry e = itr.next();
 String queueName = e.getKey();
 if (!newChildQueues.containsKey(queueName)) {
-  currentChildQueues.remove(queueName);
+  itr.remove();
 }
   }
 


-
To unsubscribe, e-mail: 

hadoop git commit: HDFS-12269. Better to return a Map rather than HashMap in getErasureCodingCodecs. Contributed by Huafeng Wang.

2017-08-16 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk ab051bd42 -> 08aaa4b36


HDFS-12269. Better to return a Map rather than HashMap in 
getErasureCodingCodecs. Contributed by Huafeng Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08aaa4b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08aaa4b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08aaa4b3

Branch: refs/heads/trunk
Commit: 08aaa4b36fab44c3f47878b3c487db3b373ffccf
Parents: ab051bd
Author: Akira Ajisaka 
Authored: Thu Aug 17 13:20:27 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Aug 17 13:20:27 2017 +0900

--
 .../java/org/apache/hadoop/io/erasurecode/CodecRegistry.java| 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java | 2 +-
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java | 3 +--
 .../java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java| 4 ++--
 .../hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java | 5 +++--
 .../ClientNamenodeProtocolServerSideTranslatorPB.java   | 3 +--
 .../hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java   | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 4 ++--
 .../apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java   | 4 ++--
 .../src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java | 3 +--
 .../java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java  | 4 ++--
 11 files changed, 18 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08aaa4b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
index fcf1349..daf91e2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
@@ -176,7 +176,7 @@ public final class CodecRegistry {
* @return a map of all codec names, and their corresponding code list
* separated by ','.
*/
-  public HashMap getCodec2CoderCompactMap() {
+  public Map getCodec2CoderCompactMap() {
 return coderNameCompactMap;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08aaa4b3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 88b273a..969522d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2764,7 +2764,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public HashMap getErasureCodingCodecs() throws IOException {
+  public Map getErasureCodingCodecs() throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) {
   return namenode.getErasureCodingCodecs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08aaa4b3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index cd368d4..8f82d03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -26,7 +26,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -2585,7 +2584,7 @@ public class DistributedFileSystem extends FileSystem {
* @return all erasure coding codecs and coders supported by this file 
system.
* @throws IOException
*/
-  public HashMap getAllErasureCodingCodecs()
+  public 

hadoop git commit: HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.

2017-08-16 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10467 c34eaff91 -> e19f93c39


HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e19f93c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e19f93c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e19f93c3

Branch: refs/heads/HDFS-10467
Commit: e19f93c39a5251ff51ced56cbb2a059b1b296941
Parents: c34eaff
Author: Inigo Goiri 
Authored: Wed Aug 16 17:31:37 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:31:37 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs   | 1 -
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java   | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e19f93c3/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index d51a8e2..d122ff7 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -31,7 +31,6 @@ function hadoop_usage
   hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
   hadoop_add_option "--workers" "turn on worker mode"
 
-<<< HEAD
   hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
   hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
   hadoop_add_subcommand "classpath" client "prints the class path needed to 
get the hadoop jar and the required libraries"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e19f93c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index eaaab39..c77d255 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1946,6 +1946,7 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
 }
 long inodeId = 0;
 return new HdfsFileStatus(0, true, 0, 0, modTime, accessTime, permission,
+EnumSet.noneOf(HdfsFileStatus.Flags.class),
 owner, group, new byte[0], DFSUtil.string2Bytes(name), inodeId,
 childrenNum, null, (byte) 0, null);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri.

2017-08-16 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10467 a43d64bdb -> c34eaff91


HDFS-11554. [Documentation] Router-based federation documentation. Contributed 
by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c34eaff9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c34eaff9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c34eaff9

Branch: refs/heads/HDFS-10467
Commit: c34eaff910e52dfda75095334fc3a1806e5bb312
Parents: a43d64b
Author: Inigo Goiri 
Authored: Wed Aug 16 17:23:29 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:23:29 2017 -0700

--
 .../src/site/markdown/HDFSRouterFederation.md   | 170 +++
 .../site/resources/images/routerfederation.png  | Bin 0 -> 24961 bytes
 2 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c34eaff9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
new file mode 100644
index 000..f094238
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -0,0 +1,170 @@
+
+
+HDFS Router-based Federation
+
+
+
+
+Introduction
+
+
+NameNodes have scalability limits because of the metadata overhead comprised 
of inodes (files and directories) and file blocks, the number of Datanode 
heartbeats, and the number of HDFS RPC client requests.
+The common solution is to split the filesystem into smaller subclusters [HDFS 
Federation](.Federation.html) and provide a federated view 
[ViewFs](.ViewFs.html).
+The problem is how to maintain the split of the subclusters (e.g., namespace 
partition), which forces users to connect to multiple subclusters and manage 
the allocation of folders/files to them.
+
+
+Architecture
+
+
+A natural extension to this partitioned federation is to add a layer of 
software responsible for federating the namespaces.
+This extra layer allows users to access any subcluster transparently, lets 
subclusters manage their own block pools independently, and supports 
rebalancing of data across subclusters.
+To accomplish these goals, the federation layer directs block accesses to the 
proper subcluster, maintains the state of the namespaces, and provides 
mechanisms for data rebalancing.
+This layer must be scalable, highly available, and fault tolerant.
+
+This federation layer comprises multiple components.
+The _Router_ component that has the same interface as a NameNode, and forwards 
the client requests to the correct subcluster, based on ground-truth 
information from a State Store.
+The _State Store_ combines a remote _Mount Table_ (in the flavor of 
[ViewFs](.ViewFs.html), but shared between clients) and utilization 
(load/capacity) information about the subclusters.
+This approach has the same architecture as [YARN 
federation](../hadoop-yarn/Federation.html).
+
+![Router-based Federation Sequence Diagram | 
width=800](./images/routerfederation.png)
+
+
+### Example flow
+The simplest configuration deploys a Router on each NameNode machine.
+The Router monitors the local NameNode and heartbeats the state to the State 
Store.
+When a regular DFS client contacts any of the Routers to access a file in the 
federated filesystem, the Router checks the Mount Table in the State Store 
(i.e., the local cache) to find out which subcluster contains the file.
+Then it checks the Membership table in the State Store (i.e., the local cache) 
for the NameNode responsible for the subcluster.
+After it has identified the correct NameNode, the Router proxies the request.
+The client accesses Datanodes directly.
+
+
+### Router
+There can be multiple Routers in the system with soft state.
+Each Router has two roles:
+
+* Federated interface: expose a single, global NameNode interface to the 
clients and forward the requests to the active NameNode in the correct 
subcluster
+* NameNode heartbeat: maintain the information about a NameNode in the State 
Store
+
+ Federated interface
+The Router receives a client request, checks the State Store for the correct 
subcluster, and forwards the request to the active NameNode of that subcluster.
+The reply from the NameNode then flows in the opposite direction.
+The Routers are stateless and can be behind a load balancer.
+For performance, the Router also caches remote mount table entries and the 
state of the subclusters.
+To make sure that changes have been propagated to all Routers, each Router 
heartbeats its state to the State 

[20/24] hadoop git commit: HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aef09267
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aef09267
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aef09267

Branch: refs/heads/HDFS-10467
Commit: aef09267868f5df884f6beadfb093679f35ef1ad
Parents: 59b9d60
Author: Inigo Goiri 
Authored: Tue Aug 1 14:40:27 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:35 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  38 ++
 .../resolver/NamenodeStatusReport.java  | 193 ++
 .../federation/router/FederationUtil.java   |  66 
 .../router/NamenodeHeartbeatService.java| 350 +++
 .../hdfs/server/federation/router/Router.java   | 112 ++
 .../src/main/resources/hdfs-default.xml |  32 ++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  |   8 +
 .../hdfs/server/federation/MockResolver.java|   9 +-
 .../server/federation/RouterConfigBuilder.java  |  22 ++
 .../server/federation/RouterDFSCluster.java |  43 +++
 .../router/TestNamenodeHeartbeat.java   | 168 +
 .../server/federation/router/TestRouter.java|   3 +
 13 files changed, 1057 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef09267/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d7c2d18..acd4790 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1129,6 +1129,20 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_ROUTER_PREFIX + "rpc.enable";
   public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
 
+  // HDFS Router heartbeat
+  public static final String DFS_ROUTER_HEARTBEAT_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "heartbeat.enable";
+  public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true;
+  public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS =
+  FEDERATION_ROUTER_PREFIX + "heartbeat.interval";
+  public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT =
+  TimeUnit.SECONDS.toMillis(5);
+  public static final String DFS_ROUTER_MONITOR_NAMENODE =
+  FEDERATION_ROUTER_PREFIX + "monitor.namenode";
+  public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE =
+  FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable";
+  public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true;
+
   // HDFS Router NN client
   public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
   FEDERATION_ROUTER_PREFIX + "connection.pool-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef09267/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 47e1c0d..0ea5e3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1237,6 +1237,44 @@ public class DFSUtil {
   }
 
   /**
+   * Map a logical namenode ID to its web address. Use the given nameservice if
+   * specified, or the configured one if none is given.
+   *
+   * @param conf Configuration
+   * @param nsId which nameservice nnId is a part of, optional
+   * @param nnId the namenode ID to get the service addr for
+   * @return the service addr, null if it could not be determined
+   */
+  public static String getNamenodeWebAddr(final Configuration conf, String 
nsId,
+  String nnId) {
+
+if (nsId == null) {
+  nsId = getOnlyNameServiceIdOrNull(conf);
+}
+
+String webAddrKey = DFSUtilClient.concatSuffixes(
+DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nsId, nnId);
+
+String webAddr =
+conf.get(webAddrKey, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT);
+return webAddr;
+  }
+
+  /**
+   * Get all of the Web addresses of the individual NNs in a given nameservice.
+   *
+   * @param conf Configuration
+   * @param nsId the 

[22/24] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a43d64bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a43d64bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a43d64bd

Branch: refs/heads/HDFS-10467
Commit: a43d64bdb3992a854adf9adfa7b6e42203cf9722
Parents: 31f8779
Author: Inigo Goiri 
Authored: Tue Aug 8 14:44:43 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:35 2017 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   |   7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  19 ++
 .../hdfs/protocolPB/RouterAdminProtocolPB.java  |  44 +++
 ...uterAdminProtocolServerSideTranslatorPB.java | 151 
 .../RouterAdminProtocolTranslatorPB.java| 150 
 .../resolver/MembershipNamenodeResolver.java|  34 +-
 .../hdfs/server/federation/router/Router.java   |  52 +++
 .../federation/router/RouterAdminServer.java| 183 ++
 .../server/federation/router/RouterClient.java  |  76 +
 .../hdfs/tools/federation/RouterAdmin.java  | 341 +++
 .../hdfs/tools/federation/package-info.java |  28 ++
 .../src/main/proto/RouterProtocol.proto |  47 +++
 .../src/main/resources/hdfs-default.xml |  46 +++
 .../server/federation/RouterConfigBuilder.java  |  26 ++
 .../server/federation/RouterDFSCluster.java |  43 ++-
 .../server/federation/StateStoreDFSCluster.java | 148 
 .../federation/router/TestRouterAdmin.java  | 261 ++
 19 files changed, 1644 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43d64bd/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 81e5fdf..360aeae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -332,6 +332,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   editlog.proto
   fsimage.proto
   FederationProtocol.proto
+  RouterProtocol.proto
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43d64bd/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index b1f44a4..d51a8e2 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -31,6 +31,7 @@ function hadoop_usage
   hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
   hadoop_add_option "--workers" "turn on worker mode"
 
+<<< HEAD
   hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
   hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
   hadoop_add_subcommand "classpath" client "prints the class path needed to 
get the hadoop jar and the required libraries"
@@ -42,6 +43,7 @@ function hadoop_usage
   hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among 
disks on a given node"
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"
   hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
+  hadoop_add_subcommand "federation" admin "manage Router-based federation"
   hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the 
NameNode"
   hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
   hadoop_add_subcommand "getconf" client "get config values from configuration"
@@ -181,6 +183,9 @@ function hdfscmd_case
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
   HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router'
 ;;
+federation)
+  HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
+;;
 secondarynamenode)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
   
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43d64bd/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index b9853d6..53bdf70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ 

[05/24] hadoop git commit: MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from hadoop-mapreduce-client-common (haibochen via rkanter)

2017-08-16 Thread inigoiri
MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from 
hadoop-mapreduce-client-common (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab051bd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab051bd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab051bd4

Branch: refs/heads/HDFS-10467
Commit: ab051bd42ee1d7c4d3b7cc71e6b2734a0955e767
Parents: 0acc5e0
Author: Robert Kanter 
Authored: Wed Aug 16 16:14:04 2017 -0700
Committer: Robert Kanter 
Committed: Wed Aug 16 16:14:04 2017 -0700

--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab051bd4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index db8ae49..b88b012 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -46,10 +46,6 @@
   org.apache.hadoop
   hadoop-mapreduce-client-core
 
-
-  org.apache.hadoop
-  hadoop-yarn-server-common
-
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/24] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/59b9d608/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
new file mode 100644
index 000..1f0d556
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer;
+
+/**
+ * API response for overriding an existing namenode registration in the state
+ * store.
+ */
+public abstract class UpdateNamenodeRegistrationResponse {
+
+  public static UpdateNamenodeRegistrationResponse newInstance() {
+return StateStoreSerializer.newRecord(
+UpdateNamenodeRegistrationResponse.class);
+  }
+
+  public static UpdateNamenodeRegistrationResponse newInstance(boolean status)
+  throws IOException {
+UpdateNamenodeRegistrationResponse response = newInstance();
+response.setResult(status);
+return response;
+  }
+
+  @Private
+  @Unstable
+  public abstract boolean getResult();
+
+  @Private
+  @Unstable
+  public abstract void setResult(boolean result);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59b9d608/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
new file mode 100644
index 000..baad113
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+import org.apache.commons.codec.binary.Base64;
+
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.Message;
+import com.google.protobuf.Message.Builder;
+import com.google.protobuf.MessageOrBuilder;
+
+/**
+ * Helper class for setting/getting data elements in an object backed by a
+ * protobuf implementation.
+ */
+public class FederationProtocolPBTranslator {
+
+  /** Optional proto byte stream used to create this object. */
+  private P proto;
+  /** The class of the proto handler for this 

[03/24] hadoop git commit: YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo Goiri via Subru).

2017-08-16 Thread inigoiri
YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo 
Goiri via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de462da0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de462da0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de462da0

Branch: refs/heads/HDFS-10467
Commit: de462da04e167a04b89ecf0f40d464cf39dc6549
Parents: 1455306
Author: Subru Krishnan 
Authored: Wed Aug 16 11:43:24 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Aug 16 11:43:24 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../hadoop-yarn-server-common/pom.xml   |   5 +
 .../impl/ZookeeperFederationStateStore.java | 634 +++
 .../impl/TestZookeeperFederationStateStore.java |  89 +++
 .../TestFederationStateStoreFacadeRetry.java|  20 +-
 .../src/site/markdown/Federation.md |  56 +-
 7 files changed, 785 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acaef8..8515e0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2629,6 +2629,14 @@ public class YarnConfiguration extends Configuration {
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
 
+  public static final String FEDERATION_STATESTORE_ZK_PREFIX =
+  FEDERATION_PREFIX + "zk-state-store.";
+  /** Parent znode path under which ZKRMStateStore will create znodes. */
+  public static final String FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  FEDERATION_STATESTORE_ZK_PREFIX + "parent-path";
+  public static final String DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  "/federationstore";
+
   private static final String FEDERATION_STATESTORE_SQL_PREFIX =
   FEDERATION_PREFIX + "state-store.sql.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 91a8b0a..c40c2c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -96,6 +96,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
 
+// Federation StateStore ZK implementation configs to be ignored
+configurationPropsToSkipCompare.add(
+YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH);
+
 // Federation StateStore SQL implementation configs to be ignored
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 441a574..e8d3880 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -130,6 +130,11 @@
 
   
 
+
+  org.apache.curator
+  curator-test
+  test
+
   
 
   


[21/24] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43d64bd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
new file mode 100644
index 000..170247f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
@@ -0,0 +1,261 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.util.Time;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * The administrator interface of the {@link Router} implemented by
+ * {@link RouterAdminServer}.
+ */
+public class TestRouterAdmin {
+
+  private static StateStoreDFSCluster cluster;
+  private static RouterContext routerContext;
+  public static final String RPC_BEAN =
+  "Hadoop:service=Router,name=FederationRPC";
+  private static List mockMountTable;
+  private static StateStoreService stateStore;
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+cluster = new StateStoreDFSCluster(false, 1);
+// Build and start a router with State Store + admin + RPC
+Configuration conf = new RouterConfigBuilder()
+.stateStore()
+.admin()
+.rpc()
+.build();
+cluster.addRouterOverrides(conf);
+cluster.startRouters();
+routerContext = cluster.getRandomRouter();
+mockMountTable = cluster.generateMockMountTable();
+Router router = routerContext.getRouter();
+stateStore = router.getStateStore();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+cluster.stopRouter(routerContext);
+  }
+
+  @Before
+  public void testSetup() throws Exception {
+assertTrue(
+synchronizeRecords(stateStore, mockMountTable, MountTable.class));
+  }
+
+  @Test
+  public void testAddMountTable() throws IOException {
+MountTable newEntry = MountTable.newInstance(
+"/testpath", Collections.singletonMap("ns0", "/testdir"),
+Time.now(), Time.now());
+
+RouterClient client = routerContext.getAdminClient();
+MountTableManager mountTable = client.getMountTableManager();
+
+// Existing mount table size
+List records = 

[07/24] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace 
and Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d91628b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d91628b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d91628b8

Branch: refs/heads/HDFS-10467
Commit: d91628b880ffdd9bb1079a35133fc0b8ce5c3593
Parents: 3a6f78d
Author: Inigo Goiri 
Authored: Tue May 2 15:49:53 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:34 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +
 .../federation/router/PeriodicService.java  | 198 
 .../StateStoreConnectionMonitorService.java |  67 +++
 .../federation/store/StateStoreService.java | 152 +-
 .../federation/store/StateStoreUtils.java   |  51 +-
 .../store/driver/StateStoreDriver.java  |  31 +-
 .../driver/StateStoreRecordOperations.java  |  17 +-
 .../store/driver/impl/StateStoreBaseImpl.java   |  31 +-
 .../driver/impl/StateStoreFileBaseImpl.java | 429 
 .../store/driver/impl/StateStoreFileImpl.java   | 161 +++
 .../driver/impl/StateStoreFileSystemImpl.java   | 178 +++
 .../driver/impl/StateStoreSerializableImpl.java |  77 +++
 .../federation/store/records/BaseRecord.java|  20 +-
 .../server/federation/store/records/Query.java  |  66 +++
 .../src/main/resources/hdfs-default.xml |  16 +
 .../store/FederationStateStoreTestUtils.java| 232 +
 .../store/driver/TestStateStoreDriverBase.java  | 483 +++
 .../store/driver/TestStateStoreFile.java|  64 +++
 .../store/driver/TestStateStoreFileSystem.java  |  88 
 19 files changed, 2329 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d91628b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 320e1f3..2b6d0e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -25,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
@@ -1119,6 +1123,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
   StateStoreSerializerPBImpl.class;
 
+  public static final String FEDERATION_STORE_DRIVER_CLASS =
+  FEDERATION_STORE_PREFIX + "driver.class";
+  public static final Class
+  FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+
+  public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
+  FEDERATION_STORE_PREFIX + "connection.test";
+  public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT =
+  TimeUnit.MINUTES.toMillis(1);
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d91628b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java
new file mode 100644
index 000..5e1
--- /dev/null
+++ 

[01/24] hadoop git commit: YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by Masahiro Tanaka. [Forced Update!]

2017-08-16 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10467 3966f9b9f -> a43d64bdb (forced update)


YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by 
Masahiro Tanaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/588c190a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/588c190a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/588c190a

Branch: refs/heads/HDFS-10467
Commit: 588c190afd49bdbd5708f7805bf6c68f09fee142
Parents: 75dd866
Author: Akira Ajisaka 
Authored: Wed Aug 16 14:06:22 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 16 14:06:22 2017 +0900

--
 .../server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java   | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/588c190a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index a4607c2..79339c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -99,7 +99,6 @@ public class FairSchedulerQueueInfo {
 steadyFairResources = new ResourceInfo(queue.getSteadyFairShare());
 fairResources = new ResourceInfo(queue.getFairShare());
 minResources = new ResourceInfo(queue.getMinShare());
-maxResources = new ResourceInfo(queue.getMaxShare());
 maxResources = new ResourceInfo(
 Resources.componentwiseMin(queue.getMaxShare(),
 scheduler.getClusterResource()));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/24] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8854913c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8854913c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8854913c

Branch: refs/heads/HDFS-10467
Commit: 8854913cae4b554a83d158c71ae29e16c16e7778
Parents: d91628b
Author: Inigo Goiri 
Authored: Thu May 11 09:57:03 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:34 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   38 +
 .../resolver/FederationNamespaceInfo.java   |   46 +-
 .../federation/resolver/RemoteLocation.java |   46 +-
 .../federation/router/ConnectionContext.java|  104 +
 .../federation/router/ConnectionManager.java|  408 
 .../federation/router/ConnectionPool.java   |  314 +++
 .../federation/router/ConnectionPoolId.java |  117 ++
 .../router/RemoteLocationContext.java   |   38 +-
 .../server/federation/router/RemoteMethod.java  |  164 ++
 .../server/federation/router/RemoteParam.java   |   71 +
 .../hdfs/server/federation/router/Router.java   |   58 +-
 .../federation/router/RouterRpcClient.java  |  856 
 .../federation/router/RouterRpcServer.java  | 1867 +-
 .../src/main/resources/hdfs-default.xml |   95 +
 .../server/federation/FederationTestUtils.java  |   80 +-
 .../hdfs/server/federation/MockResolver.java|   90 +-
 .../server/federation/RouterConfigBuilder.java  |   20 +-
 .../server/federation/RouterDFSCluster.java |  535 +++--
 .../server/federation/router/TestRouter.java|   31 +-
 .../server/federation/router/TestRouterRpc.java |  869 
 .../router/TestRouterRpcMultiDestination.java   |  216 ++
 21 files changed, 5675 insertions(+), 388 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8854913c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 2b6d0e8..ca24fd5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1102,6 +1102,44 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   // HDFS Router-based federation
   public static final String FEDERATION_ROUTER_PREFIX =
   "dfs.federation.router.";
+  public static final String DFS_ROUTER_DEFAULT_NAMESERVICE =
+  FEDERATION_ROUTER_PREFIX + "default.nameserviceId";
+  public static final String DFS_ROUTER_HANDLER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.count";
+  public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10;
+  public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.queue.size";
+  public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_READER_COUNT_KEY =
+  FEDERATION_ROUTER_PREFIX + "reader.count";
+  public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1;
+  public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY =
+  FEDERATION_ROUTER_PREFIX + "handler.queue.size";
+  public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100;
+  public static final String DFS_ROUTER_RPC_BIND_HOST_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-bind-host";
+  public static final int DFS_ROUTER_RPC_PORT_DEFAULT = ;
+  public static final String DFS_ROUTER_RPC_ADDRESS_KEY =
+  FEDERATION_ROUTER_PREFIX + "rpc-address";
+  public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT =
+  "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT;
+  public static final String DFS_ROUTER_RPC_ENABLE =
+  FEDERATION_ROUTER_PREFIX + "rpc.enable";
+  public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true;
+
+  // HDFS Router NN client
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE =
+  FEDERATION_ROUTER_PREFIX + "connection.pool-size";
+  public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT =
+  64;
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN =
+  FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms";
+  public static final long DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT =
+  TimeUnit.MINUTES.toMillis(1);
+  public static final String DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS =
+  FEDERATION_ROUTER_PREFIX + "connection.clean.ms";
+  public 

[04/24] hadoop git commit: MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent constructor. Contributed by Oleg Danilov

2017-08-16 Thread inigoiri
MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent 
constructor. Contributed by Oleg Danilov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0acc5e00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0acc5e00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0acc5e00

Branch: refs/heads/HDFS-10467
Commit: 0acc5e00362602f027524637a86ca1bf80982986
Parents: de462da
Author: Jason Lowe 
Authored: Wed Aug 16 16:34:06 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 16:34:06 2017 -0500

--
 .../TaskAttemptUnsuccessfulCompletionEvent.java | 28 ++--
 1 file changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0acc5e00/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
index 1732d91..1752967 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
@@ -60,7 +60,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   int[] physMemKbytes;
   private static final Counters EMPTY_COUNTERS = new Counters();
 
-  /** 
+  /**
* Create an event to record the unsuccessful completion of attempts
* @param id Attempt ID
* @param taskType Type of the task
@@ -74,7 +74,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
* @param allSplits the "splits", or a pixelated graph of various
*measurable worker node state variables against progress.
*Currently there are four; wallclock time, CPU time,
-   *virtual memory and physical memory.  
+   *virtual memory and physical memory.
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
@@ -101,7 +101,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
   }
 
-  /** 
+  /**
* @deprecated please use the constructor with an additional
*  argument, an array of splits arrays instead.  See
*  {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
@@ -117,19 +117,19 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
-String status, long finishTime, 
+String status, long finishTime,
 String hostname, String error) {
 this(id, taskType, status, finishTime, hostname, -1, "",
 error, EMPTY_COUNTERS, null);
   }
-  
+
   public TaskAttemptUnsuccessfulCompletionEvent
   (TaskAttemptID id, TaskType taskType,
String status, long finishTime,
String hostname, int port, String rackName,
String error, int[][] allSplits) {
 this(id, taskType, status, finishTime, hostname, port,
-rackName, error, EMPTY_COUNTERS, null);
+rackName, error, EMPTY_COUNTERS, allSplits);
   }
 
   TaskAttemptUnsuccessfulCompletionEvent() {}
@@ -162,9 +162,9 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 }
 return datum;
   }
-  
-  
-  
+
+
+
   public void setDatum(Object odatum) {
 this.datum =
 (TaskAttemptUnsuccessfulCompletion)odatum;
@@ -208,12 +208,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   public String getHostname() { return hostname; }
   /** Get the rpc port for the host where the attempt executed */
   public int getPort() { return port; }
-  
+
   /** Get the rack name of the node where the attempt ran */
   public String getRackName() {
 return rackName == null ? null : rackName.toString();
   }
-  
+
   /** Get the error string */
   public String getError() { return error.toString(); }
   /** Get the task status */
@@ -224,12 +224,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements 

[17/24] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-10687. Federation Membership State Store internal API. Contributed by 
Jason Kace and Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59b9d608
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59b9d608
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59b9d608

Branch: refs/heads/HDFS-10467
Commit: 59b9d608ec71059dfc18a805f68ee66a7e997cb0
Parents: 58b6088
Author: Inigo Goiri 
Authored: Mon Jul 31 10:55:21 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:34 2017 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   3 +
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   1 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +-
 .../resolver/MembershipNamenodeResolver.java| 290 
 .../federation/router/FederationUtil.java   |  42 +-
 .../federation/store/CachedRecordStore.java | 237 ++
 .../federation/store/MembershipStore.java   | 126 +
 .../federation/store/StateStoreCache.java   |  36 ++
 .../store/StateStoreCacheUpdateService.java |  67 +++
 .../federation/store/StateStoreService.java | 202 +++-
 .../store/impl/MembershipStoreImpl.java | 311 +
 .../federation/store/impl/package-info.java |  31 ++
 .../GetNamenodeRegistrationsRequest.java|  52 +++
 .../GetNamenodeRegistrationsResponse.java   |  55 +++
 .../store/protocol/GetNamespaceInfoRequest.java |  30 ++
 .../protocol/GetNamespaceInfoResponse.java  |  52 +++
 .../protocol/NamenodeHeartbeatRequest.java  |  52 +++
 .../protocol/NamenodeHeartbeatResponse.java |  49 ++
 .../UpdateNamenodeRegistrationRequest.java  |  72 +++
 .../UpdateNamenodeRegistrationResponse.java |  51 ++
 .../impl/pb/FederationProtocolPBTranslator.java | 145 ++
 .../GetNamenodeRegistrationsRequestPBImpl.java  |  87 
 .../GetNamenodeRegistrationsResponsePBImpl.java |  99 
 .../impl/pb/GetNamespaceInfoRequestPBImpl.java  |  60 +++
 .../impl/pb/GetNamespaceInfoResponsePBImpl.java |  95 
 .../impl/pb/NamenodeHeartbeatRequestPBImpl.java |  93 
 .../pb/NamenodeHeartbeatResponsePBImpl.java |  71 +++
 ...UpdateNamenodeRegistrationRequestPBImpl.java |  95 
 ...pdateNamenodeRegistrationResponsePBImpl.java |  73 +++
 .../store/protocol/impl/pb/package-info.java|  29 ++
 .../store/records/MembershipState.java  | 329 +
 .../store/records/MembershipStats.java  | 126 +
 .../records/impl/pb/MembershipStatePBImpl.java  | 334 +
 .../records/impl/pb/MembershipStatsPBImpl.java  | 191 
 .../src/main/proto/FederationProtocol.proto | 107 +
 .../src/main/resources/hdfs-default.xml |  18 +-
 .../resolver/TestNamenodeResolver.java  | 284 
 .../store/FederationStateStoreTestUtils.java|  23 +-
 .../federation/store/TestStateStoreBase.java|  81 
 .../store/TestStateStoreMembershipState.java| 463 +++
 .../store/driver/TestStateStoreDriverBase.java  |  69 ++-
 .../store/records/TestMembershipState.java  | 129 ++
 42 files changed, 4745 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59b9d608/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 9582fcb..4b958b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -15,6 +15,9 @@

  
  
+   
+ 
+ 

  
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59b9d608/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index fa1044d..81e5fdf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -331,6 +331,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   QJournalProtocol.proto
   editlog.proto
   fsimage.proto
+  FederationProtocol.proto
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59b9d608/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 

[19/24] hadoop git commit: HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and 
Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a6f78d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a6f78d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a6f78d0

Branch: refs/heads/HDFS-10467
Commit: 3a6f78d01bad838ab863917d6d8ade3609bcda5f
Parents: 3d0b38a
Author: Inigo 
Authored: Thu Apr 6 19:18:52 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:34 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  11 ++
 .../server/federation/store/RecordStore.java| 100 
 .../store/driver/StateStoreSerializer.java  | 119 +++
 .../driver/impl/StateStoreSerializerPBImpl.java | 115 ++
 .../store/records/impl/pb/PBRecord.java |  47 
 .../store/records/impl/pb/package-info.java |  29 +
 .../src/main/resources/hdfs-default.xml |   8 ++
 7 files changed, 429 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a6f78d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0eb42ce..320e1f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -1108,6 +1109,16 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT 
=
   "org.apache.hadoop.hdfs.server.federation.MockResolver";
 
+  // HDFS Router-based federation State Store
+  public static final String FEDERATION_STORE_PREFIX =
+  FEDERATION_ROUTER_PREFIX + "store.";
+
+  public static final String FEDERATION_STORE_SERIALIZER_CLASS =
+  DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer";
+  public static final Class
+  FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT =
+  StateStoreSerializerPBImpl.class;
+
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a6f78d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
new file mode 100644
index 000..524f432
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.lang.reflect.Constructor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import 

[14/24] hadoop git commit: HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and 
Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d0b38ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d0b38ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d0b38ac

Branch: refs/heads/HDFS-10467
Commit: 3d0b38ac7377fa47ed41049ba8877dc07c671d5e
Parents: 1b22682
Author: Inigo 
Authored: Wed Mar 29 19:35:06 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:34 2017 -0700

--
 .../store/StateStoreUnavailableException.java   |  33 
 .../federation/store/StateStoreUtils.java   |  72 +++
 .../store/driver/StateStoreDriver.java  | 172 +
 .../driver/StateStoreRecordOperations.java  | 164 
 .../store/driver/impl/StateStoreBaseImpl.java   |  69 +++
 .../store/driver/impl/package-info.java |  39 
 .../federation/store/driver/package-info.java   |  37 
 .../federation/store/protocol/package-info.java |  31 +++
 .../federation/store/records/BaseRecord.java| 189 +++
 .../federation/store/records/QueryResult.java   |  56 ++
 .../federation/store/records/package-info.java  |  36 
 11 files changed, 898 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d0b38ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
new file mode 100644
index 000..4e6f8c8
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store;
+
+import java.io.IOException;
+
+/**
+ * Thrown when the state store is not reachable or available. Cached APIs and
+ * queries may succeed. Client should retry again later.
+ */
+public class StateStoreUnavailableException extends IOException {
+
+  private static final long serialVersionUID = 1L;
+
+  public StateStoreUnavailableException(String msg) {
+super(msg);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d0b38ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
new file mode 100644
index 000..8c681df
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language 

[09/24] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8854913c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 24792bb..4bae71e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -17,16 +17,109 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY;
+
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Options;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.AddBlockFlag;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.inotify.EventBatchList;
+import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
+import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
+import 
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 

[08/24] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8854913c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
index ee6f57d..2875750 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.util.Time;
 
 /**
  * In-memory cache/mock of a namenode and file resolver. Stores the most
- * recently updated NN information for each nameservice and block pool. Also
+ * recently updated NN information for each nameservice and block pool. It also
  * stores a virtual mount table for resolving global namespace paths to local 
NN
  * paths.
  */
@@ -51,82 +51,93 @@ public class MockResolver
 implements ActiveNamenodeResolver, FileSubclusterResolver {
 
   private Map resolver =
-  new HashMap();
-  private Map locations =
-  new HashMap();
-  private Set namespaces =
-  new HashSet();
+  new HashMap<>();
+  private Map locations = new HashMap<>();
+  private Set namespaces = new HashSet<>();
   private String defaultNamespace = null;
 
+
   public MockResolver(Configuration conf, StateStoreService store) {
 this.cleanRegistrations();
   }
 
-  public void addLocation(String mount, String nameservice, String location) {
-RemoteLocation remoteLocation = new RemoteLocation(nameservice, location);
-List locationsList = locations.get(mount);
+  public void addLocation(String mount, String nsId, String location) {
+List locationsList = this.locations.get(mount);
 if (locationsList == null) {
-  locationsList = new LinkedList();
-  locations.put(mount, locationsList);
+  locationsList = new LinkedList<>();
+  this.locations.put(mount, locationsList);
 }
+
+final RemoteLocation remoteLocation = new RemoteLocation(nsId, location);
 if (!locationsList.contains(remoteLocation)) {
   locationsList.add(remoteLocation);
 }
 
 if (this.defaultNamespace == null) {
-  this.defaultNamespace = nameservice;
+  this.defaultNamespace = nsId;
 }
   }
 
   public synchronized void cleanRegistrations() {
-this.resolver =
-new HashMap();
-this.namespaces = new HashSet();
+this.resolver = new HashMap<>();
+this.namespaces = new HashSet<>();
   }
 
   @Override
   public void updateActiveNamenode(
-  String ns, InetSocketAddress successfulAddress) {
+  String nsId, InetSocketAddress successfulAddress) {
 
 String address = successfulAddress.getHostName() + ":" +
 successfulAddress.getPort();
-String key = ns;
+String key = nsId;
 if (key != null) {
   // Update the active entry
   @SuppressWarnings("unchecked")
-  List iterator =
-  (List) resolver.get(key);
-  for (FederationNamenodeContext namenode : iterator) {
+  List namenodes =
+  (List) this.resolver.get(key);
+  for (FederationNamenodeContext namenode : namenodes) {
 if (namenode.getRpcAddress().equals(address)) {
   MockNamenodeContext nn = (MockNamenodeContext) namenode;
   nn.setState(FederationNamenodeServiceState.ACTIVE);
   break;
 }
   }
-  Collections.sort(iterator, new NamenodePriorityComparator());
+  // This operation modifies the list so we need to be careful
+  synchronized(namenodes) {
+Collections.sort(namenodes, new NamenodePriorityComparator());
+  }
 }
   }
 
   @Override
   public List
   getNamenodesForNameserviceId(String nameserviceId) {
-return resolver.get(nameserviceId);
+// Return a copy of the list because it is updated periodically
+List namenodes =
+this.resolver.get(nameserviceId);
+return Collections.unmodifiableList(new ArrayList<>(namenodes));
   }
 
   @Override
   public List getNamenodesForBlockPoolId(
   String blockPoolId) {
-return resolver.get(blockPoolId);
+// Return a copy of the list because it is updated periodically
+List namenodes =
+this.resolver.get(blockPoolId);
+return Collections.unmodifiableList(new ArrayList<>(namenodes));
   }
 
   private static class MockNamenodeContext
   implements FederationNamenodeContext {
+
+private String namenodeId;
+private String nameserviceId;
+
 private String webAddress;
 private String rpcAddress;
 private String serviceAddress;
 private String lifelineAddress;
-private String namenodeId;
-private String 

[24/24] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-10880. Federation Mount Table State Store internal API. Contributed by 
Jason Kace and Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31f87792
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31f87792
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31f87792

Branch: refs/heads/HDFS-10467
Commit: 31f877922859f876ef33705a4411da1c9f399306
Parents: aef0926
Author: Inigo Goiri 
Authored: Fri Aug 4 18:00:12 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:35 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   7 +-
 .../federation/resolver/MountTableManager.java  |  80 +++
 .../federation/resolver/MountTableResolver.java | 544 +++
 .../federation/resolver/PathLocation.java   | 124 -
 .../resolver/order/DestinationOrder.java|  29 +
 .../federation/resolver/order/package-info.java |  29 +
 .../federation/router/FederationUtil.java   |  56 +-
 .../hdfs/server/federation/router/Router.java   |   3 +-
 .../federation/store/MountTableStore.java   |  49 ++
 .../federation/store/StateStoreService.java |   2 +
 .../store/impl/MountTableStoreImpl.java | 116 
 .../protocol/AddMountTableEntryRequest.java |  47 ++
 .../protocol/AddMountTableEntryResponse.java|  42 ++
 .../protocol/GetMountTableEntriesRequest.java   |  49 ++
 .../protocol/GetMountTableEntriesResponse.java  |  53 ++
 .../protocol/RemoveMountTableEntryRequest.java  |  49 ++
 .../protocol/RemoveMountTableEntryResponse.java |  42 ++
 .../protocol/UpdateMountTableEntryRequest.java  |  51 ++
 .../protocol/UpdateMountTableEntryResponse.java |  43 ++
 .../pb/AddMountTableEntryRequestPBImpl.java |  84 +++
 .../pb/AddMountTableEntryResponsePBImpl.java|  76 +++
 .../pb/GetMountTableEntriesRequestPBImpl.java   |  76 +++
 .../pb/GetMountTableEntriesResponsePBImpl.java  | 104 
 .../pb/RemoveMountTableEntryRequestPBImpl.java  |  76 +++
 .../pb/RemoveMountTableEntryResponsePBImpl.java |  76 +++
 .../pb/UpdateMountTableEntryRequestPBImpl.java  |  96 
 .../pb/UpdateMountTableEntryResponsePBImpl.java |  76 +++
 .../federation/store/records/MountTable.java| 301 ++
 .../store/records/impl/pb/MountTablePBImpl.java | 213 
 .../src/main/proto/FederationProtocol.proto |  61 ++-
 .../hdfs/server/federation/MockResolver.java|   9 +-
 .../resolver/TestMountTableResolver.java| 396 ++
 .../store/FederationStateStoreTestUtils.java|  16 +
 .../store/TestStateStoreMountTable.java | 250 +
 .../store/driver/TestStateStoreDriverBase.java  |  12 +
 .../store/records/TestMountTable.java   | 176 ++
 36 files changed, 3437 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f87792/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index acd4790..f156fdb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
@@ -1160,8 +1162,9 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   // HDFS Router State Store connection
   public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =
   FEDERATION_ROUTER_PREFIX + "file.resolver.client.class";
-  public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
-  "org.apache.hadoop.hdfs.server.federation.MockResolver";
+  public static final Class
+  FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT =
+  MountTableResolver.class;
   public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS =
   

[15/24] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/59b9d608/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
new file mode 100644
index 000..2d74505
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.resolver;
+
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.ROUTERS;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
+import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyException;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.newStateStore;
+import static 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import 
org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException;
+import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test the basic {@link ActiveNamenodeResolver} functionality.
+ */
+public class TestNamenodeResolver {
+
+  private static StateStoreService stateStore;
+  private static ActiveNamenodeResolver namenodeResolver;
+
+  @BeforeClass
+  public static void create() throws Exception {
+
+Configuration conf = getStateStoreConfiguration();
+
+// Reduce expirations to 5 seconds
+conf.setLong(
+DFSConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS,
+TimeUnit.SECONDS.toMillis(5));
+
+stateStore = newStateStore(conf);
+assertNotNull(stateStore);
+
+namenodeResolver = new MembershipNamenodeResolver(conf, stateStore);
+namenodeResolver.setRouterId(ROUTERS[0]);
+  }
+
+  @AfterClass
+  public static void destroy() throws Exception {
+stateStore.stop();
+stateStore.close();
+  }
+
+  @Before
+  public void setup() throws IOException, InterruptedException {
+// Wait for state store to connect
+stateStore.loadDriver();
+waitStateStore(stateStore, 1);
+
+// Clear NN registrations
+boolean cleared = clearRecords(stateStore, MembershipState.class);
+assertTrue(cleared);
+  }
+
+  @Test
+  public void testStateStoreDisconnected() throws Exception {
+
+// Add an entry to the store
+NamenodeStatusReport report = createNamenodeReport(
+NAMESERVICES[0], NAMENODES[0], HAServiceState.ACTIVE);
+assertTrue(namenodeResolver.registerNamenode(report));
+
+// Close the data store driver
+stateStore.closeDriver();
+assertFalse(stateStore.isDriverReady());
+
+// Flush the caches
+stateStore.refreshCaches(true);
+
+// Verify commands 

[10/24] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8854913c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
new file mode 100644
index 000..3a32be1
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -0,0 +1,856 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * A client proxy for Router -> NN communication using the NN ClientProtocol.
+ * 
+ * Provides routers to invoke remote ClientProtocol methods and handle
+ * retries/failover.
+ * 
+ * invokeSingle Make a single request to a single namespace
+ * invokeSequential Make a sequential series of requests to multiple
+ * ordered namespaces until a condition is met.
+ * invokeConcurrent Make concurrent requests to multiple namespaces and
+ * return all of the results.
+ * 
+ * Also maintains a cached pool of connections to NNs. Connections are managed
+ * by the ConnectionManager and are unique to each user + NN. The size of the
+ * connection pool can be configured. Larger pools allow for more simultaneous
+ * requests to a single NN from a single user.
+ */
+public class RouterRpcClient {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(RouterRpcClient.class);
+
+
+  /** Router identifier. */
+  private final String routerId;
+
+  /** Interface to identify the active NN for a nameservice or blockpool ID. */
+  private final ActiveNamenodeResolver namenodeResolver;
+
+  /** Connection pool to the Namenodes per user for performance. */
+  private final ConnectionManager connectionManager;
+  /** Service to run asynchronous calls. */
+  private final ExecutorService executorService;
+  /** Retry policy for router -> NN communication. */
+  private final RetryPolicy retryPolicy;
+
+  /** Pattern to parse a stack trace line. */
+  private static final Pattern STACK_TRACE_PATTERN =
+  Pattern.compile("\\tat (.*)\\.(.*)\\((.*):(\\d*)\\)");
+
+
+  /**
+   * Create a router RPC 

[13/24] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b226827
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b226827
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b226827

Branch: refs/heads/HDFS-10467
Commit: 1b226827fba3d032da4e6304381900bff9f5151a
Parents: ab051bd
Author: Inigo 
Authored: Tue Mar 28 14:30:59 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:34 2017 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../hadoop-hdfs/src/main/bin/hdfs.cmd   |   8 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +
 .../resolver/ActiveNamenodeResolver.java| 117 +++
 .../resolver/FederationNamenodeContext.java |  87 +++
 .../FederationNamenodeServiceState.java |  46 ++
 .../resolver/FederationNamespaceInfo.java   |  99 +++
 .../resolver/FileSubclusterResolver.java|  75 ++
 .../resolver/NamenodePriorityComparator.java|  63 ++
 .../resolver/NamenodeStatusReport.java  | 195 +
 .../federation/resolver/PathLocation.java   | 122 +++
 .../federation/resolver/RemoteLocation.java |  74 ++
 .../federation/resolver/package-info.java   |  41 +
 .../federation/router/FederationUtil.java   | 117 +++
 .../router/RemoteLocationContext.java   |  38 +
 .../hdfs/server/federation/router/Router.java   | 263 +++
 .../federation/router/RouterRpcServer.java  | 102 +++
 .../server/federation/router/package-info.java  |  31 +
 .../federation/store/StateStoreService.java |  77 ++
 .../server/federation/store/package-info.java   |  62 ++
 .../src/main/resources/hdfs-default.xml |  16 +
 .../server/federation/FederationTestUtils.java  | 233 ++
 .../hdfs/server/federation/MockResolver.java| 290 +++
 .../server/federation/RouterConfigBuilder.java  |  40 +
 .../server/federation/RouterDFSCluster.java | 767 +++
 .../server/federation/router/TestRouter.java|  96 +++
 26 files changed, 3080 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b226827/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index e6405b5..b1f44a4 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -57,6 +57,7 @@ function hadoop_usage
   hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an 
fsimage"
   hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer 
to a legacy fsimage"
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
+  hadoop_add_subcommand "router" daemon "run the DFS router"
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
   hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage 
policies"
@@ -176,6 +177,10 @@ function hdfscmd_case
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
   HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
 ;;
+router)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router'
+;;
 secondarynamenode)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
   
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b226827/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index 2181e47..b9853d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
 )
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto debug
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode 
dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups 
snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath 
crypto router debug
   for %%i in ( %hdfscommands% ) do (
 if %hdfs-command% == %%i set hdfscommand=true

[23/24] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f87792/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
new file mode 100644
index 000..7f7c998
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb;
+
+import java.io.IOException;
+
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
+import 
org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProtoOrBuilder;
+import 
org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord;
+
+import com.google.protobuf.Message;
+
+/**
+ * Protobuf implementation of the state store API object
+ * RemoveMountTableEntryRequest.
+ */
+public class RemoveMountTableEntryRequestPBImpl
+extends RemoveMountTableEntryRequest implements PBRecord {
+
+  private FederationProtocolPBTranslator translator =
+  new FederationProtocolPBTranslator(
+  RemoveMountTableEntryRequestProto.class);
+
+  public RemoveMountTableEntryRequestPBImpl() {
+  }
+
+  public RemoveMountTableEntryRequestPBImpl(
+  RemoveMountTableEntryRequestProto proto) {
+this.setProto(proto);
+  }
+
+  @Override
+  public RemoveMountTableEntryRequestProto getProto() {
+return this.translator.build();
+  }
+
+  @Override
+  public void setProto(Message proto) {
+this.translator.setProto(proto);
+  }
+
+  @Override
+  public void readInstance(String base64String) throws IOException {
+this.translator.readInstance(base64String);
+  }
+
+  @Override
+  public String getSrcPath() {
+return this.translator.getProtoOrBuilder().getSrcPath();
+  }
+
+  @Override
+  public void setSrcPath(String path) {
+this.translator.getBuilder().setSrcPath(path);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31f87792/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
new file mode 100644
index 000..0c943ac
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[02/24] hadoop git commit: YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by Robert Kanter

2017-08-16 Thread inigoiri
YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by 
Robert Kanter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14553061
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14553061
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14553061

Branch: refs/heads/HDFS-10467
Commit: 14553061be0a341df3e628dcaf06717b4630b05e
Parents: 588c190
Author: Jason Lowe 
Authored: Wed Aug 16 13:04:36 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 13:04:36 2017 -0500

--
 .../apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14553061/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index 14df94a..6a063e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -151,13 +151,13 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
YarnClient rmClient = YarnClient.createYarnClient()) {
   Configuration conf = new YarnConfiguration();
   conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
+  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500);
   // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
   // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
   conf.setInt(
-  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
+  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 
20);
   cluster.init(conf);
   cluster.start();
   final Configuration yarnConf = cluster.getConfig();
@@ -198,7 +198,7 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
 lastToken = response.getAMRMToken();
 
 // Time slot to be sure the AMRMProxy renew the token
-Thread.sleep(1500);
+Thread.sleep(4500);
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/24] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d91628b8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
new file mode 100644
index 000..7f0b36a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java
@@ -0,0 +1,483 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.store.driver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
+import org.apache.hadoop.hdfs.server.federation.store.records.Query;
+import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
+import org.junit.AfterClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Base tests for the driver. The particular implementations will use this to
+ * test their functionality.
+ */
+public class TestStateStoreDriverBase {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStateStoreDriverBase.class);
+
+  private static StateStoreService stateStore;
+  private static Configuration conf;
+
+
+  /**
+   * Get the State Store driver.
+   * @return State Store driver.
+   */
+  protected StateStoreDriver getStateStoreDriver() {
+return stateStore.getDriver();
+  }
+
+  @AfterClass
+  public static void tearDownCluster() {
+if (stateStore != null) {
+  stateStore.stop();
+}
+  }
+
+  /**
+   * Get a new State Store using this configuration.
+   *
+   * @param config Configuration for the State Store.
+   * @throws Exception If we cannot get the State Store.
+   */
+  public static void getStateStore(Configuration config) throws Exception {
+conf = config;
+stateStore = FederationStateStoreTestUtils.getStateStore(conf);
+  }
+
+  private  T generateFakeRecord(Class recordClass)
+  throws IllegalArgumentException, IllegalAccessException, IOException {
+
+// TODO add record
+return null;
+  }
+
+  /**
+   * Validate if a record is the same.
+   *
+   * @param original
+   * @param committed
+   * @param assertEquals Assert if the records are equal or just return.
+   * @return
+   * @throws IllegalArgumentException
+   * @throws IllegalAccessException
+   */
+  private boolean validateRecord(
+  BaseRecord original, BaseRecord committed, boolean assertEquals)
+  throws IllegalArgumentException, IllegalAccessException {
+
+boolean ret = true;
+
+Map fields = getFields(original);
+for (String key : fields.keySet()) {
+  if (key.equals("dateModified") ||
+  key.equals("dateCreated") ||
+  key.equals("proto")) {
+// Fields are updated/set on commit and fetch and may not match
+// the fields that are initialized in a non-committed object.
+continue;
+  }
+  Object data1 = getField(original, key);
+  Object data2 = getField(committed, key);
+  if (assertEquals) {
+assertEquals("Field " + key + " does not match", data1, data2);
+  } else if (!data1.equals(data2)) {
+ret = false;
+  }
+}
+
+long now = 

[18/24] hadoop git commit: HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.

2017-08-16 Thread inigoiri
HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58b60889
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58b60889
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58b60889

Branch: refs/heads/HDFS-10467
Commit: 58b60889b07ecf9167110ab12da152d84cf36062
Parents: 8854913
Author: Inigo Goiri 
Authored: Fri Jul 28 15:55:10 2017 -0700
Committer: Inigo Goiri 
Committed: Wed Aug 16 17:11:34 2017 -0700

--
 .../federation/router/RouterRpcServer.java  | 59 +---
 1 file changed, 51 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58b60889/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 4bae71e..eaaab39 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -64,8 +64,9 @@ import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.inotify.EventBatchList;
-import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlocksStats;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
@@ -75,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1736,13 +1739,6 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   }
 
   @Override // ClientProtocol
-  public AddingECPolicyResponse[] addErasureCodingPolicies(
-  ErasureCodingPolicy[] policies) throws IOException {
-checkOperation(OperationCategory.WRITE, false);
-return null;
-  }
-
-  @Override // ClientProtocol
   public void unsetErasureCodingPolicy(String src) throws IOException {
 checkOperation(OperationCategory.WRITE, false);
   }
@@ -1808,6 +1804,53 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
 return null;
   }
 
+  @Override
+  public AddECPolicyResponse[] addErasureCodingPolicies(
+  ErasureCodingPolicy[] arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+return null;
+  }
+
+  @Override
+  public void removeErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public void disableErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public void enableErasureCodingPolicy(String arg0) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public ECBlockGroupsStats getECBlockGroupsStats() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override
+  public HashMap getErasureCodingCodecs() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override
+  public BlocksStats getBlocksStats() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return null;
+  }
+
+  @Override
+  public 

[12/24] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.

2017-08-16 Thread inigoiri
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b226827/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
new file mode 100644
index 000..ee6f57d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import 
org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityComparator;
+import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
+import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.Time;
+
+/**
+ * In-memory cache/mock of a namenode and file resolver. Stores the most
+ * recently updated NN information for each nameservice and block pool. Also
+ * stores a virtual mount table for resolving global namespace paths to local 
NN
+ * paths.
+ */
+public class MockResolver
+implements ActiveNamenodeResolver, FileSubclusterResolver {
+
+  private Map resolver =
+  new HashMap();
+  private Map locations =
+  new HashMap();
+  private Set namespaces =
+  new HashSet();
+  private String defaultNamespace = null;
+
+  public MockResolver(Configuration conf, StateStoreService store) {
+this.cleanRegistrations();
+  }
+
+  public void addLocation(String mount, String nameservice, String location) {
+RemoteLocation remoteLocation = new RemoteLocation(nameservice, location);
+List locationsList = locations.get(mount);
+if (locationsList == null) {
+  locationsList = new LinkedList();
+  locations.put(mount, locationsList);
+}
+if (!locationsList.contains(remoteLocation)) {
+  locationsList.add(remoteLocation);
+}
+
+if (this.defaultNamespace == null) {
+  this.defaultNamespace = nameservice;
+}
+  }
+
+  public synchronized void cleanRegistrations() {
+this.resolver =
+new HashMap();
+this.namespaces = new HashSet();
+  }
+
+  @Override
+  public void updateActiveNamenode(
+  String ns, InetSocketAddress successfulAddress) {
+
+String address = successfulAddress.getHostName() + ":" +
+successfulAddress.getPort();
+String key = ns;
+if (key != null) {
+  // Update the active entry
+  @SuppressWarnings("unchecked")
+  List iterator =
+  (List) resolver.get(key);
+  for (FederationNamenodeContext namenode : iterator) {
+if (namenode.getRpcAddress().equals(address)) {
+  MockNamenodeContext nn = (MockNamenodeContext) namenode;
+  nn.setState(FederationNamenodeServiceState.ACTIVE);
+  break;
+}
+  }
+  Collections.sort(iterator, new NamenodePriorityComparator());
+}
+  }
+
+  @Override
+  public List
+  getNamenodesForNameserviceId(String nameserviceId) {
+return resolver.get(nameserviceId);
+  }

hadoop git commit: MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from hadoop-mapreduce-client-common (haibochen via rkanter)

2017-08-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 03d15fa58 -> 9de332d50


MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from 
hadoop-mapreduce-client-common (haibochen via rkanter)

(cherry picked from commit ab051bd42ee1d7c4d3b7cc71e6b2734a0955e767)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9de332d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9de332d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9de332d5

Branch: refs/heads/branch-2
Commit: 9de332d502d996833fbd6935fa3e5b793eccdc29
Parents: 03d15fa
Author: Robert Kanter 
Authored: Wed Aug 16 16:14:04 2017 -0700
Committer: Robert Kanter 
Committed: Wed Aug 16 16:15:48 2017 -0700

--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9de332d5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index 378fd14..73c0bf5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -47,10 +47,6 @@
   org.apache.hadoop
   hadoop-mapreduce-client-core
 
-
-  org.apache.hadoop
-  hadoop-yarn-server-common
-
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from hadoop-mapreduce-client-common (haibochen via rkanter)

2017-08-16 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0acc5e003 -> ab051bd42


MAPREDUCE-6936. Remove unnecessary dependency of hadoop-yarn-server-common from 
hadoop-mapreduce-client-common (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab051bd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab051bd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab051bd4

Branch: refs/heads/trunk
Commit: ab051bd42ee1d7c4d3b7cc71e6b2734a0955e767
Parents: 0acc5e0
Author: Robert Kanter 
Authored: Wed Aug 16 16:14:04 2017 -0700
Committer: Robert Kanter 
Committed: Wed Aug 16 16:14:04 2017 -0700

--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab051bd4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index db8ae49..b88b012 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -46,10 +46,6 @@
   org.apache.hadoop
   hadoop-mapreduce-client-core
 
-
-  org.apache.hadoop
-  hadoop-yarn-server-common
-
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11430. Separate class InnerNode from class NetworkTopology and make it extendable. Contributed by Tsz Wo Nicholas Sze

2017-08-16 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 107e1722d -> 03d15fa58


HDFS-11430. Separate class InnerNode from class NetworkTopology and make it 
extendable. Contributed by Tsz Wo Nicholas Sze


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03d15fa5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03d15fa5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03d15fa5

Branch: refs/heads/branch-2
Commit: 03d15fa58e19309b09e39e048218f73759150b81
Parents: 107e172
Author: Mingliang Liu 
Authored: Tue Feb 21 15:29:20 2017 -0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Wed Aug 16 16:06:13 2017 -0700

--
 .../java/org/apache/hadoop/net/InnerNode.java   |  67 
 .../org/apache/hadoop/net/InnerNodeImpl.java| 304 +
 .../org/apache/hadoop/net/NetworkTopology.java  | 326 +--
 .../net/NetworkTopologyWithNodeGroup.java   |  43 +--
 .../apache/hadoop/net/TestNetworkTopology.java  |   2 +-
 5 files changed, 388 insertions(+), 354 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03d15fa5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
new file mode 100644
index 000..d07929b
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNode.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.List;
+
+
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public interface InnerNode extends Node {
+  interface Factory {
+/** Construct an InnerNode from a path-like string */
+N newInnerNode(String path);
+  }
+
+  /** Add node n to the subtree of this node
+   * @param n node to be added
+   * @return true if the node is added; false otherwise
+   */
+  boolean add(Node n);
+
+  /** Given a node's string representation, return a reference to the node
+   * @param loc string location of the form /rack/node
+   * @return null if the node is not found or the childnode is there but
+   * not an instance of {@link InnerNodeImpl}
+   */
+  Node getLoc(String loc);
+
+  /** @return its children */
+  List getChildren();
+
+  /** @return the number of leave nodes. */
+  int getNumOfLeaves();
+
+  /** Remove node n from the subtree of this node
+   * @param n node to be deleted
+   * @return true if the node is deleted; false otherwise
+   */
+  boolean remove(Node n);
+
+  /** get leafIndex leaf of this subtree
+   * if it is not in the excludedNode
+   *
+   * @param leafIndex an indexed leaf of the node
+   * @param excludedNode an excluded node (can be null)
+   * @return
+   */
+  Node getLeaf(int leafIndex, Node excludedNode);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03d15fa5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
new file mode 100644
index 000..e6aa0f7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/InnerNodeImpl.java
@@ -0,0 +1,304 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this 

hadoop git commit: HDFS-12305. Ozone: SCM: Add StateMachine for pipeline/container. Contributed by Xiaoyu Yao.

2017-08-16 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 8151f26a1 -> 293c425b2


HDFS-12305. Ozone: SCM: Add StateMachine for pipeline/container. Contributed by 
Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/293c425b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/293c425b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/293c425b

Branch: refs/heads/HDFS-7240
Commit: 293c425b25b1c2ceff2eaa726e804ede767e371c
Parents: 8151f26
Author: Anu Engineer 
Authored: Wed Aug 16 14:36:34 2017 -0700
Committer: Anu Engineer 
Committed: Wed Aug 16 14:36:34 2017 -0700

--
 .../InvalidStateTransitionException.java| 42 +
 .../helpers/StateMachine/StateMachine.java  | 69 ++
 .../org/apache/hadoop/scm/TestStateMachine.java | 95 
 3 files changed, 206 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/293c425b/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/InvalidStateTransitionException.java
--
diff --git 
a/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/InvalidStateTransitionException.java
 
b/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/InvalidStateTransitionException.java
new file mode 100644
index 000..1fab16b
--- /dev/null
+++ 
b/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/InvalidStateTransitionException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.scm.container.common.helpers.StateMachine;
+
+/**
+ * Class wraps invalid state transition exception.
+ */
+public class InvalidStateTransitionException extends Exception {
+  private Enum currentState;
+  private Enum event;
+
+  public InvalidStateTransitionException(Enum currentState, Enum event) {
+super("Invalid event: " + event + " at " + currentState + " state.");
+this.currentState = currentState;
+this.event = event;
+  }
+
+  public Enum getCurrentState() {
+return currentState;
+  }
+
+  public Enum getEvent() {
+return event;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/293c425b/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/StateMachine.java
--
diff --git 
a/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/StateMachine.java
 
b/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/StateMachine.java
new file mode 100644
index 000..1d5436f
--- /dev/null
+++ 
b/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/StateMachine/StateMachine.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.scm.container.common.helpers.StateMachine;
+
+import com.google.common.base.Supplier;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import 

hadoop git commit: MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent constructor. Contributed by Oleg Danilov

2017-08-16 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cf30380d4 -> 107e1722d


MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent 
constructor. Contributed by Oleg Danilov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/107e1722
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/107e1722
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/107e1722

Branch: refs/heads/branch-2
Commit: 107e1722d40447be64ca462f112de099895880b5
Parents: cf30380
Author: Jason Lowe 
Authored: Wed Aug 16 16:34:06 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 16:35:41 2017 -0500

--
 .../TaskAttemptUnsuccessfulCompletionEvent.java | 28 ++--
 1 file changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/107e1722/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
index 77ee2a0..9a11e57 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
@@ -56,7 +56,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   int[] physMemKbytes;
   private static final Counters EMPTY_COUNTERS = new Counters();
 
-  /** 
+  /**
* Create an event to record the unsuccessful completion of attempts
* @param id Attempt ID
* @param taskType Type of the task
@@ -70,7 +70,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
* @param allSplits the "splits", or a pixelated graph of various
*measurable worker node state variables against progress.
*Currently there are four; wallclock time, CPU time,
-   *virtual memory and physical memory.  
+   *virtual memory and physical memory.
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
@@ -97,7 +97,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
   }
 
-  /** 
+  /**
* @deprecated please use the constructor with an additional
*  argument, an array of splits arrays instead.  See
*  {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
@@ -113,19 +113,19 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
-String status, long finishTime, 
+String status, long finishTime,
 String hostname, String error) {
 this(id, taskType, status, finishTime, hostname, -1, "",
 error, EMPTY_COUNTERS, null);
   }
-  
+
   public TaskAttemptUnsuccessfulCompletionEvent
   (TaskAttemptID id, TaskType taskType,
String status, long finishTime,
String hostname, int port, String rackName,
String error, int[][] allSplits) {
 this(id, taskType, status, finishTime, hostname, port,
-rackName, error, EMPTY_COUNTERS, null);
+rackName, error, EMPTY_COUNTERS, allSplits);
   }
 
   TaskAttemptUnsuccessfulCompletionEvent() {}
@@ -158,9 +158,9 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 }
 return datum;
   }
-  
-  
-  
+
+
+
   public void setDatum(Object odatum) {
 this.datum =
 (TaskAttemptUnsuccessfulCompletion)odatum;
@@ -204,12 +204,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   public String getHostname() { return hostname; }
   /** Get the rpc port for the host where the attempt executed */
   public int getPort() { return port; }
-  
+
   /** Get the rack name of the node where the attempt ran */
   public String getRackName() {
 return rackName == null ? null : rackName.toString();
   }
-  
+
   /** Get the error string */
   public String getError() { return error.toString(); }
   /** Get the task status */
@@ 

hadoop git commit: MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent constructor. Contributed by Oleg Danilov

2017-08-16 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk de462da04 -> 0acc5e003


MAPREDUCE-6940. Copy-paste error in the TaskAttemptUnsuccessfulCompletionEvent 
constructor. Contributed by Oleg Danilov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0acc5e00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0acc5e00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0acc5e00

Branch: refs/heads/trunk
Commit: 0acc5e00362602f027524637a86ca1bf80982986
Parents: de462da
Author: Jason Lowe 
Authored: Wed Aug 16 16:34:06 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 16:34:06 2017 -0500

--
 .../TaskAttemptUnsuccessfulCompletionEvent.java | 28 ++--
 1 file changed, 14 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0acc5e00/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
index 1732d91..1752967 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
@@ -60,7 +60,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   int[] physMemKbytes;
   private static final Counters EMPTY_COUNTERS = new Counters();
 
-  /** 
+  /**
* Create an event to record the unsuccessful completion of attempts
* @param id Attempt ID
* @param taskType Type of the task
@@ -74,7 +74,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
* @param allSplits the "splits", or a pixelated graph of various
*measurable worker node state variables against progress.
*Currently there are four; wallclock time, CPU time,
-   *virtual memory and physical memory.  
+   *virtual memory and physical memory.
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
@@ -101,7 +101,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
   }
 
-  /** 
+  /**
* @deprecated please use the constructor with an additional
*  argument, an array of splits arrays instead.  See
*  {@link org.apache.hadoop.mapred.ProgressSplitsBlock}
@@ -117,19 +117,19 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
*/
   public TaskAttemptUnsuccessfulCompletionEvent
(TaskAttemptID id, TaskType taskType,
-String status, long finishTime, 
+String status, long finishTime,
 String hostname, String error) {
 this(id, taskType, status, finishTime, hostname, -1, "",
 error, EMPTY_COUNTERS, null);
   }
-  
+
   public TaskAttemptUnsuccessfulCompletionEvent
   (TaskAttemptID id, TaskType taskType,
String status, long finishTime,
String hostname, int port, String rackName,
String error, int[][] allSplits) {
 this(id, taskType, status, finishTime, hostname, port,
-rackName, error, EMPTY_COUNTERS, null);
+rackName, error, EMPTY_COUNTERS, allSplits);
   }
 
   TaskAttemptUnsuccessfulCompletionEvent() {}
@@ -162,9 +162,9 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
 }
 return datum;
   }
-  
-  
-  
+
+
+
   public void setDatum(Object odatum) {
 this.datum =
 (TaskAttemptUnsuccessfulCompletion)odatum;
@@ -208,12 +208,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent 
implements HistoryEvent {
   public String getHostname() { return hostname; }
   /** Get the rpc port for the host where the attempt executed */
   public int getPort() { return port; }
-  
+
   /** Get the rack name of the node where the attempt ran */
   public String getRackName() {
 return rackName == null ? null : rackName.toString();
   }
-  
+
   /** Get the error string */
   public String getError() { return error.toString(); }
   /** Get the task status */
@@ -224,12 

hadoop git commit: HDFS-12238. Ozone: Add valid trace ID check in sendCommandAsync. Contributed by Ajay Kumar.

2017-08-16 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 63edc5b1e -> 8151f26a1


HDFS-12238. Ozone: Add valid trace ID check in sendCommandAsync. Contributed by 
Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8151f26a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8151f26a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8151f26a

Branch: refs/heads/HDFS-7240
Commit: 8151f26a1f8f7bb9ebd7688c698be9940ed6198b
Parents: 63edc5b
Author: Anu Engineer 
Authored: Wed Aug 16 14:24:58 2017 -0700
Committer: Anu Engineer 
Committed: Wed Aug 16 14:24:58 2017 -0700

--
 .../apache/hadoop/scm/XceiverClientHandler.java |  7 +++
 .../ozone/container/ContainerTestHelper.java| 36 ++-
 .../container/ozoneimpl/TestOzoneContainer.java | 65 +++-
 3 files changed, 103 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8151f26a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
index 99fec16..93d4438 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.SimpleChannelInboundHandler;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 .ContainerCommandResponseProto;
@@ -124,6 +125,12 @@ public class XceiverClientHandler extends
*/
   public CompletableFuture
 sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request) {
+
+// Throw an exception of request doesn't have traceId
+if(StringUtils.isEmpty(request.getTraceID())) {
+  throw new IllegalArgumentException("Invalid trace ID");
+}
+
 CompletableFuture response =
 new CompletableFuture<>();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8151f26a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 637755b..602a276 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -206,6 +206,8 @@ public final class ContainerTestHelper {
 ContainerCommandRequestProto.newBuilder();
 request.setCmdType(ContainerProtos.Type.WriteChunk);
 request.setWriteChunk(writeRequest);
+request.setTraceID(UUID.randomUUID().toString());
+
 return request.build();
   }
 
@@ -266,6 +268,7 @@ public final class ContainerTestHelper {
 ContainerCommandRequestProto.newBuilder();
 request.setCmdType(ContainerProtos.Type.GetSmallFile);
 request.setGetSmallFile(smallFileRequest);
+request.setTraceID(UUID.randomUUID().toString());
 return request.build();
   }
 
@@ -295,6 +298,7 @@ public final class ContainerTestHelper {
 ContainerCommandRequestProto.newBuilder();
 newRequest.setCmdType(ContainerProtos.Type.ReadChunk);
 newRequest.setReadChunk(readRequest);
+newRequest.setTraceID(UUID.randomUUID().toString());
 return newRequest.build();
   }
 
@@ -325,6 +329,7 @@ public final class ContainerTestHelper {
 ContainerCommandRequestProto.newBuilder();
 request.setCmdType(ContainerProtos.Type.DeleteChunk);
 request.setDeleteChunk(deleteRequest);
+request.setTraceID(UUID.randomUUID().toString());
 return request.build();
   }
 
@@ -353,6 +358,8 @@ public final class ContainerTestHelper {
 ContainerCommandRequestProto.newBuilder();
 request.setCmdType(ContainerProtos.Type.CreateContainer);
 request.setCreateContainer(createRequest);
+request.setTraceID(UUID.randomUUID().toString());
+
 return request.build();
   }
 
@@ -391,6 +398,7 @@ public final class ContainerTestHelper {
  

hadoop git commit: YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo Goiri via Subru).

2017-08-16 Thread subru
Repository: hadoop
Updated Branches:
  refs/heads/trunk 14553061b -> de462da04


YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo 
Goiri via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de462da0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de462da0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de462da0

Branch: refs/heads/trunk
Commit: de462da04e167a04b89ecf0f40d464cf39dc6549
Parents: 1455306
Author: Subru Krishnan 
Authored: Wed Aug 16 11:43:24 2017 -0700
Committer: Subru Krishnan 
Committed: Wed Aug 16 11:43:24 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../hadoop-yarn-server-common/pom.xml   |   5 +
 .../impl/ZookeeperFederationStateStore.java | 634 +++
 .../impl/TestZookeeperFederationStateStore.java |  89 +++
 .../TestFederationStateStoreFacadeRetry.java|  20 +-
 .../src/site/markdown/Federation.md |  56 +-
 7 files changed, 785 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acaef8..8515e0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2629,6 +2629,14 @@ public class YarnConfiguration extends Configuration {
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
 
+  public static final String FEDERATION_STATESTORE_ZK_PREFIX =
+  FEDERATION_PREFIX + "zk-state-store.";
+  /** Parent znode path under which ZKRMStateStore will create znodes. */
+  public static final String FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  FEDERATION_STATESTORE_ZK_PREFIX + "parent-path";
+  public static final String DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  "/federationstore";
+
   private static final String FEDERATION_STATESTORE_SQL_PREFIX =
   FEDERATION_PREFIX + "state-store.sql.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 91a8b0a..c40c2c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -96,6 +96,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
 
+// Federation StateStore ZK implementation configs to be ignored
+configurationPropsToSkipCompare.add(
+YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH);
+
 // Federation StateStore SQL implementation configs to be ignored
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de462da0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 441a574..e8d3880 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -130,6 +130,11 @@
 
   
 
+
+  org.apache.curator
+  curator-test
+  test
+
   
 
   


[32/50] [abbrv] hadoop git commit: YARN-4830. Add support for resource types in the nodemanager. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-4830. Add support for resource types in the nodemanager. Contributed by 
Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3235c00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3235c00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3235c00

Branch: refs/heads/YARN-3926
Commit: b3235c00f1816293af19de7ec41bc4ba61330b02
Parents: cba1702
Author: Varun Vasudev 
Authored: Sat Jun 11 14:33:46 2016 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:13:02 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |   3 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  19 ++-
 .../FileSystemBasedConfigurationProvider.java   |   3 +-
 .../hadoop/yarn/LocalConfigurationProvider.java |   3 +-
 .../api/records/impl/pb/ResourcePBImpl.java |  53 +++---
 .../yarn/util/resource/ResourceUtils.java   | 168 +++
 .../yarn/util/resource/TestResourceUtils.java   |  29 +++-
 .../resource-types/node-resources-1.xml |  29 
 .../resource-types/node-resources-2.xml |  39 +
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../util/NodeManagerHardwareUtils.java  |  52 ++
 .../resourcemanager/ResourceTrackerService.java |   9 +-
 12 files changed, 342 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3235c00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index ee8ef03..c9c6a7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -308,7 +308,8 @@ public abstract class Resource implements 
Comparable {
 continue;
   }
   if (entry.getKey().equals(ResourceInformation.VCORES.getName())
-  && entry.getValue().getUnits().equals("")) {
+  && entry.getValue().getUnits()
+  .equals(ResourceInformation.VCORES.getUnits())) {
 continue;
   }
   sb.append(", ").append(entry.getKey()).append(": ")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3235c00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 856bb84..e809b7d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -65,6 +65,10 @@ public class YarnConfiguration extends Configuration {
   "resource-types.xml";
 
   @Private
+  public static final String NODE_RESOURCES_CONFIGURATION_FILE =
+  "node-resources.xml";
+
+  @Private
   public static final List RM_CONFIGURATION_FILES =
   Collections.unmodifiableList(Arrays.asList(
   RESOURCE_TYPES_CONFIGURATION_FILE,
@@ -74,6 +78,16 @@ public class YarnConfiguration extends Configuration {
   YARN_SITE_CONFIGURATION_FILE,
   CORE_SITE_CONFIGURATION_FILE));
 
+  @Private
+  public static final List NM_CONFIGURATION_FILES =
+  Collections.unmodifiableList(Arrays.asList(
+  NODE_RESOURCES_CONFIGURATION_FILE,
+  DR_CONFIGURATION_FILE,
+  CS_CONFIGURATION_FILE,
+  HADOOP_POLICY_CONFIGURATION_FILE,
+  YARN_SITE_CONFIGURATION_FILE,
+  CORE_SITE_CONFIGURATION_FILE));
+
   @Evolving
   public static final int APPLICATION_MAX_TAGS = 10;
 
@@ -112,12 +126,15 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_PREFIX = "yarn.";
 
   /
-  // Scheduler resource types configs
+  // Resource types configs
   
 
   public static final String RESOURCE_TYPES =
   YarnConfiguration.YARN_PREFIX + "resource-types";
 
+  public static final String NM_RESOURCES_PREFIX =
+  YarnConfiguration.NM_PREFIX + "resource-type.";
+
   /** Delay before deleting resource to ease debugging of NM issues */
   

[21/50] [abbrv] hadoop git commit: YARN-5146. Support for Fair Scheduler in new YARN UI. Contributed by Abdullah Yousufi.

2017-08-16 Thread sunilg
YARN-5146. Support for Fair Scheduler in new YARN UI. Contributed by Abdullah 
Yousufi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dadb0c22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dadb0c22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dadb0c22

Branch: refs/heads/YARN-3926
Commit: dadb0c2225adef5cb0126610733c285b51f4f43e
Parents: e3ae3e2
Author: Sunil G 
Authored: Tue Aug 15 21:58:44 2017 +0530
Committer: Sunil G 
Committed: Tue Aug 15 21:58:44 2017 +0530

--
 .../src/main/webapp/app/adapters/yarn-queue.js  |  30 -
 .../app/adapters/yarn-queue/capacity-queue.js   |  23 
 .../app/adapters/yarn-queue/fair-queue.js   |  23 
 .../app/adapters/yarn-queue/fifo-queue.js   |  23 
 .../app/adapters/yarn-queue/yarn-queue.js   |  30 +
 .../main/webapp/app/components/tree-selector.js |  19 ++-
 .../src/main/webapp/app/models/yarn-queue.js|  94 --
 .../app/models/yarn-queue/capacity-queue.js |  95 ++
 .../webapp/app/models/yarn-queue/fair-queue.js  |  79 
 .../webapp/app/models/yarn-queue/fifo-queue.js  |  52 
 .../webapp/app/models/yarn-queue/yarn-queue.js  |  23 
 .../main/webapp/app/routes/cluster-overview.js  |   4 +-
 .../src/main/webapp/app/routes/yarn-queue.js|  26 ++--
 .../src/main/webapp/app/routes/yarn-queues.js   |  12 +-
 .../main/webapp/app/routes/yarn-queues/index.js |  25 
 .../app/routes/yarn-queues/queues-selector.js   |  25 
 .../main/webapp/app/serializers/yarn-queue.js   | 129 ---
 .../serializers/yarn-queue/capacity-queue.js| 128 ++
 .../app/serializers/yarn-queue/fair-queue.js|  92 +
 .../app/serializers/yarn-queue/fifo-queue.js|  59 +
 .../app/serializers/yarn-queue/yarn-queue.js|  47 +++
 .../components/queue-configuration-table.hbs|  54 
 .../templates/components/queue-navigator.hbs|   7 +-
 .../yarn-queue/capacity-queue-conf-table.hbs|  54 
 .../yarn-queue/capacity-queue-info.hbs  |  84 
 .../components/yarn-queue/capacity-queue.hbs|  63 +
 .../yarn-queue/fair-queue-conf-table.hbs|  52 
 .../components/yarn-queue/fair-queue-info.hbs   |  66 ++
 .../components/yarn-queue/fair-queue.hbs|  63 +
 .../yarn-queue/fifo-queue-conf-table.hbs|  56 
 .../components/yarn-queue/fifo-queue-info.hbs   |  47 +++
 .../components/yarn-queue/fifo-queue.hbs|  48 +++
 .../webapp/app/templates/yarn-queue/info.hbs|  73 +--
 .../main/webapp/app/templates/yarn-queues.hbs   |  54 +---
 .../src/main/webapp/app/utils/color-utils.js|   1 -
 35 files changed, 1266 insertions(+), 494 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadb0c22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
deleted file mode 100644
index f2017df..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import AbstractAdapter from './abstract';
-
-export default AbstractAdapter.extend({
-  address: "rmWebAddress",
-  restNameSpace: "cluster",
-  serverName: "RM",
-
-  pathForType(/*modelName*/) {
-return 'scheduler'; // move to some common place, return path by modelname.
-  }
-
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadb0c22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js
--
diff --git 

[24/50] [abbrv] hadoop git commit: HADOOP-14773. Extend ZKCuratorManager API for more reusability. (Íñigo Goiri via Subru).

2017-08-16 Thread sunilg
HADOOP-14773. Extend ZKCuratorManager API for more reusability. (Íñigo Goiri 
via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75dd866b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75dd866b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75dd866b

Branch: refs/heads/YARN-3926
Commit: 75dd866bfb8b63cb9f13179d4365b05c48e0907d
Parents: f34646d
Author: Subru Krishnan 
Authored: Tue Aug 15 16:53:59 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Aug 15 16:53:59 2017 -0700

--
 .../hadoop/util/curator/ZKCuratorManager.java   | 54 ++--
 .../util/curator/TestZKCuratorManager.java  |  2 +-
 .../recovery/ZKRMStateStore.java| 19 +--
 3 files changed, 52 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75dd866b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
index 3adf028..9a031af 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
@@ -33,9 +33,12 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Helper class that provides utility methods specific to ZK operations.
  */
@@ -179,7 +182,6 @@ public final class ZKCuratorManager {
   /**
* Get the data in a ZNode.
* @param path Path of the ZNode.
-   * @param stat Output statistics of the ZNode.
* @return The data in the ZNode.
* @throws Exception If it cannot contact Zookeeper.
*/
@@ -190,16 +192,38 @@ public final class ZKCuratorManager {
   /**
* Get the data in a ZNode.
* @param path Path of the ZNode.
-   * @param stat Output statistics of the ZNode.
+   * @param stat
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public byte[] getData(final String path, Stat stat) throws Exception {
+return curator.getData().storingStatIn(stat).forPath(path);
+  }
+
+  /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
* @return The data in the ZNode.
* @throws Exception If it cannot contact Zookeeper.
*/
-  public String getSringData(final String path) throws Exception {
+  public String getStringData(final String path) throws Exception {
 byte[] bytes = getData(path);
 return new String(bytes, Charset.forName("UTF-8"));
   }
 
   /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
+   * @param stat Output statistics of the ZNode.
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public String getStringData(final String path, Stat stat) throws Exception {
+byte[] bytes = getData(path, stat);
+return new String(bytes, Charset.forName("UTF-8"));
+  }
+
+  /**
* Set data into a ZNode.
* @param path Path of the ZNode.
* @param data Data to set.
@@ -272,14 +296,36 @@ public final class ZKCuratorManager {
   }
 
   /**
+   * Utility function to ensure that the configured base znode exists.
+   * This recursively creates the znode as well as all of its parents.
+   * @param path Path of the znode to create.
+   * @throws Exception If it cannot create the file.
+   */
+  public void createRootDirRecursively(String path) throws Exception {
+String[] pathParts = path.split("/");
+Preconditions.checkArgument(
+pathParts.length >= 1 && pathParts[0].isEmpty(),
+"Invalid path: %s", path);
+StringBuilder sb = new StringBuilder();
+
+for (int i = 1; i < pathParts.length; i++) {
+  sb.append("/").append(pathParts[i]);
+  create(sb.toString());
+}
+  }
+
+  /**
* Delete a ZNode.
* @param path Path of the ZNode.
+   * @return If the znode was deleted.
* @throws Exception If it cannot contact ZooKeeper.
*/
-  public void delete(final String path) throws Exception {
+  public boolean delete(final String path) throws Exception {
 if (exists(path)) {
   curator.delete().deletingChildrenIfNeeded().forPath(path);
+  return true;
 }
+return false;
   }
 
   /**


[26/50] [abbrv] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-08-16 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a180be4/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
new file mode 100644
index 000..63fbc9d
--- /dev/null
+++ b/hadoop-build-tools/src/main/resources/META-INF/NOTICE.txt
@@ -0,0 +1,283 @@
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
+
+The binary distribution of this product bundles binaries of
+org.iq80.leveldb:leveldb-api (https://github.com/dain/leveldb), which has the
+following notices:
+* Copyright 2011 Dain Sundstrom 
+* Copyright 2011 FuseSource Corp. http://fusesource.com
+
+The binary distribution of this product bundles binaries of
+org.fusesource.hawtjni:hawtjni-runtime (https://github.com/fusesource/hawtjni),
+which has the following notices:
+* This product includes software developed by FuseSource Corp.
+  http://fusesource.com
+* This product includes software developed at
+  Progress Software Corporation and/or its  subsidiaries or affiliates.
+* This product includes software developed by IBM Corporation and others.
+
+The binary distribution of this product bundles binaries of
+AWS Java SDK 1.10.6,
+which has the following notices:
+ * This software includes third party software subject to the following
+ copyrights: - XML parsing and utility functions from JetS3t - Copyright
+ 2006-2009 James Murty. - JSON parsing and utility functions from JSON.org -
+ Copyright 2002 JSON.org. - PKCS#1 PEM encoded private key parsing and utility
+ functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc.
+
+The binary distribution of this product bundles binaries of
+Gson 2.2.4,
+which has the following notices:
+
+The Netty Project
+=
+
+Please visit the Netty web site for more information:
+
+  * http://netty.io/
+
+Copyright 2014 The Netty Project
+
+The Netty Project licenses this file to you under the Apache License,
+version 2.0 (the "License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at:
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+Also, please refer to each LICENSE..txt file, which is located in
+the 'license' directory of the distribution file, for the license terms of the
+components that this product depends on.
+
+---
+This product contains the extensions to Java Collections Framework which has
+been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+  * LICENSE:
+* license/LICENSE.jsr166y.txt (Public Domain)
+  * HOMEPAGE:
+* http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+* 
http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+This product contains a modified version of Robert Harder's Public Domain
+Base64 Encoder and Decoder, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.base64.txt (Public Domain)
+  * HOMEPAGE:
+* http://iharder.sourceforge.net/current/java/base64/
+
+This product contains a modified portion of 'Webbit', an event based
+WebSocket and HTTP server, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.webbit.txt (BSD License)
+  * HOMEPAGE:
+* https://github.com/joewalnes/webbit
+
+This product contains a modified portion of 'SLF4J', a simple logging
+facade for Java, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.slf4j.txt (MIT License)
+  * HOMEPAGE:
+* http://www.slf4j.org/
+
+This product contains a modified portion of 'ArrayDeque', written by Josh
+Bloch of Google, Inc:
+
+  * LICENSE:
+* license/LICENSE.deque.txt (Public Domain)
+
+This product contains a modified portion of 'Apache Harmony', an open source
+Java SE, which can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.harmony.txt (Apache License 2.0)
+  * HOMEPAGE:
+* http://archive.apache.org/dist/harmony/
+
+This product contains a modified version of Roland Kuhn's ASL2
+AbstractNodeQueue, which is based on Dmitriy Vyukov's non-intrusive MPSC queue.
+It can be obtained at:
+
+  * LICENSE:
+* license/LICENSE.abstractnodequeue.txt (Public Domain)
+  * HOMEPAGE:
+* 
https://github.com/akka/akka/blob/wip-2.2.3-for-scala-2.11/akka-actor/src/main/java/akka/dispatch/AbstractNodeQueue.java
+
+This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+and decompression library 

[22/50] [abbrv] hadoop git commit: YARN-7014. Fix off-by-one error causing heap corruption (Jason Lowe via nroberts)

2017-08-16 Thread sunilg
YARN-7014. Fix off-by-one error causing heap corruption (Jason Lowe via 
nroberts)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2654590
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2654590
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2654590

Branch: refs/heads/YARN-3926
Commit: d265459024b8e5f5eccf421627f684ca8f162112
Parents: dadb0c2
Author: Nathan Roberts 
Authored: Tue Aug 15 15:52:48 2017 -0500
Committer: Nathan Roberts 
Committed: Tue Aug 15 15:52:48 2017 -0500

--
 .../src/main/native/container-executor/impl/utils/string-utils.c  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2654590/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
index 703d484..063df7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
@@ -44,8 +44,7 @@ int validate_container_id(const char* input) {
* container_e17_1410901177871_0001_01_05
* container_1410901177871_0001_01_05
*/
-  char* input_cpy = malloc(strlen(input));
-  strcpy(input_cpy, input);
+  char* input_cpy = strdup(input);
   char* p = strtok(input_cpy, "_");
   int idx = 0;
   while (p != NULL) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-6232. Update resource usage and preempted resource calculations to take into account all resource types. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd1244ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
index cd04264..47e517f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppBlock.java
@@ -36,6 +36,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.webapp.AppBlock;
+import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet.DIV;
@@ -98,15 +99,12 @@ public class RMAppBlock extends AppBlock{
   attemptResourcePreempted)
 .__("Number of Non-AM Containers Preempted from Current Attempt:",
   attemptNumNonAMContainerPreempted)
-.__("Aggregate Resource Allocation:",
-  String.format("%d MB-seconds, %d vcore-seconds",
-  appMetrics == null ? "N/A" : appMetrics.getMemorySeconds(),
-  appMetrics == null ? "N/A" : appMetrics.getVcoreSeconds()))
+.__("Aggregate Resource Allocation:", appMetrics == null ? "N/A" :
+StringHelper
+.getResourceSecondsString(appMetrics.getResourceSecondsMap()))
 .__("Aggregate Preempted Resource Allocation:",
-  String.format("%d MB-seconds, %d vcore-seconds",
-appMetrics == null ? "N/A" : 
appMetrics.getPreemptedMemorySeconds(),
-appMetrics == null ? "N/A" :
-appMetrics.getPreemptedVcoreSeconds()));
+appMetrics == null ? "N/A" : StringHelper.getResourceSecondsString(
+appMetrics.getPreemptedResourceSecondsMap()));
 
 pdiv.__();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd1244ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index f11939a..6036fb5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -100,6 +100,7 @@ public class AppInfo {
   protected long vcoreSeconds;
   protected float queueUsagePercentage;
   protected float clusterUsagePercentage;
+  protected Map resourceSecondsMap;
 
   // preemption info fields
   protected long preemptedResourceMB;
@@ -108,6 +109,7 @@ public class AppInfo {
   protected int numAMContainerPreempted;
   private long preemptedMemorySeconds;
   private long preemptedVcoreSeconds;
+  protected Map preemptedResourceSecondsMap;
 
   // list of resource requests
   @XmlElement(name = "resourceRequests")
@@ -238,8 +240,10 @@ public class AppInfo {
   appMetrics.getResourcePreempted().getVirtualCores();
   memorySeconds = appMetrics.getMemorySeconds();
   vcoreSeconds = appMetrics.getVcoreSeconds();
+  resourceSecondsMap = appMetrics.getResourceSecondsMap();
   preemptedMemorySeconds = appMetrics.getPreemptedMemorySeconds();
   preemptedVcoreSeconds = appMetrics.getPreemptedVcoreSeconds();
+  preemptedResourceSecondsMap = 
appMetrics.getPreemptedResourceSecondsMap();
   ApplicationSubmissionContext appSubmissionContext =
   app.getApplicationSubmissionContext();
   unmanagedApplication =
@@ -432,7 +436,7 @@ 

[39/50] [abbrv] hadoop git commit: YARN-5588. [YARN-3926] Add support for resource profiles in distributed shell. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-5588. [YARN-3926] Add support for resource profiles in distributed shell. 
Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c280ad08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c280ad08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c280ad08

Branch: refs/heads/YARN-3926
Commit: c280ad08d0ae23d6e799e08d5904164d603d0a19
Parents: 85237a8
Author: Sunil G 
Authored: Mon Feb 27 21:44:14 2017 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:19:23 2017 +0530

--
 .../yarn/api/records/ProfileCapability.java |  16 +-
 .../ResourceProfilesNotEnabledException.java|  43 +
 .../distributedshell/ApplicationMaster.java |  61 +--
 .../applications/distributedshell/Client.java   | 174 +++
 .../distributedshell/TestDistributedShell.java  |  29 
 .../yarn/client/api/impl/TestAMRMClient.java|   2 +-
 .../server/resourcemanager/ClientRMService.java |   4 +-
 .../resource/ResourceProfilesManagerImpl.java   |   6 +-
 .../scheduler/ClusterNodeTracker.java   |  12 +-
 9 files changed, 288 insertions(+), 59 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c280ad08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
index faaddd5..1a8d1c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
@@ -150,17 +150,21 @@ public abstract class ProfileCapability {
 .checkArgument(capability != null, "Capability cannot be null");
 Preconditions.checkArgument(resourceProfilesMap != null,
 "Resource profiles map cannot be null");
+Resource none = Resource.newInstance(0, 0);
 Resource resource = Resource.newInstance(0, 0);
-
-if (resourceProfilesMap.containsKey(capability.getProfileName())) {
-  resource = Resource
-  .newInstance(resourceProfilesMap.get(capability.getProfileName()));
+String profileName = capability.getProfileName();
+if (profileName.isEmpty()) {
+  profileName = DEFAULT_PROFILE;
+}
+if (resourceProfilesMap.containsKey(profileName)) {
+  resource = Resource.newInstance(resourceProfilesMap.get(profileName));
 }
 
-if(capability.getProfileCapabilityOverride()!= null) {
+if (capability.getProfileCapabilityOverride() != null &&
+!capability.getProfileCapabilityOverride().equals(none)) {
   for (Map.Entry entry : capability
   .getProfileCapabilityOverride().getResources().entrySet()) {
-if (entry.getValue() != null && entry.getValue().getValue() != 0) {
+if (entry.getValue() != null && entry.getValue().getValue() >= 0) {
   resource.setResourceInformation(entry.getKey(), entry.getValue());
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c280ad08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
new file mode 100644
index 000..558e075
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ResourceProfilesNotEnabledException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT 

[34/50] [abbrv] hadoop git commit: YARN-4715. Add support to read resource types from a config file. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-4715. Add support to read resource types from a config file. Contributed 
by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97b8b54d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97b8b54d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97b8b54d

Branch: refs/heads/YARN-3926
Commit: 97b8b54d13b997d0bbefab7a73a6a09eccfc38b7
Parents: d5f4686
Author: Varun Vasudev 
Authored: Fri Mar 11 15:03:15 2016 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:13:02 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |  24 +-
 .../yarn/api/records/ResourceInformation.java   |   8 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../exceptions/ResourceNotFoundException.java   |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml  |   8 +
 .../api/records/impl/pb/ResourcePBImpl.java |  72 ++
 .../resource/DominantResourceCalculator.java|   5 +-
 .../yarn/util/resource/ResourceUtils.java   | 229 +
 .../hadoop/yarn/util/resource/Resources.java|  18 +-
 .../src/main/resources/yarn-default.xml |  10 +
 .../yarn/util/resource/TestResourceUtils.java   | 248 +++
 .../resource-types/resource-types-1.xml |  18 ++
 .../resource-types/resource-types-2.xml |  29 +++
 .../resource-types/resource-types-3.xml |  24 ++
 .../resource-types/resource-types-4.xml |  34 +++
 .../resource-types/resource-types-error-1.xml   |  29 +++
 .../resource-types/resource-types-error-2.xml   |  29 +++
 .../resource-types/resource-types-error-3.xml   |  29 +++
 .../resource-types/resource-types-error-4.xml   |  24 ++
 19 files changed, 762 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97b8b54d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 2371b13..ee8ef03 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -101,15 +101,6 @@ public abstract class Resource implements 
Comparable {
 return new SimpleResource(memory, vCores);
   }
 
-  @Public
-  @Stable
-  public static Resource newInstance(
-  Map resources) {
-Resource resource = Records.newRecord(Resource.class);
-resource.setResources(resources);
-return resource;
-  }
-
   /**
* This method is DEPRECATED:
* Use {@link Resource#getMemorySize()} instead
@@ -234,15 +225,6 @@ public abstract class Resource implements 
Comparable {
   public abstract Long getResourceValue(String resource) throws YarnException;
 
   /**
-   * Set the resources to the map specified.
-   *
-   * @param resources Desired resources
-   */
-  @Public
-  @Evolving
-  public abstract void setResources(Map 
resources);
-
-  /**
* Set the ResourceInformation object for a particular resource.
*
* @param resource the resource for which the ResourceInformation is provided
@@ -276,8 +258,8 @@ public abstract class Resource implements 
Comparable {
 result = prime * result + getVirtualCores();
 for (Map.Entry entry : getResources()
 .entrySet()) {
-  if (entry.getKey().equals(ResourceInformation.MEMORY.getName()) || entry
-  .getKey().equals(ResourceInformation.VCORES.getName())) {
+  if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+  || entry.getKey().equals(ResourceInformation.VCORES.getName())) {
 continue;
   }
   result = prime * result + entry.getValue().hashCode();
@@ -320,7 +302,7 @@ public abstract class Resource implements 
Comparable {
 .append(getVirtualCores());
 for (Map.Entry entry : getResources()
 .entrySet()) {
-  if (entry.getKey().equals(ResourceInformation.MEMORY.getName())
+  if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
   && entry.getValue().getUnits()
   .equals(ResourceInformation.MEMORY_MB.getUnits())) {
 continue;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97b8b54d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java

[47/50] [abbrv] hadoop git commit: YARN-6892. [YARN-3926] Improve API implementation in Resources and DominantResourceCalculator class. Contributed by Sunil G.

2017-08-16 Thread sunilg
YARN-6892. [YARN-3926] Improve API implementation in Resources and 
DominantResourceCalculator class. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6606c3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6606c3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6606c3f

Branch: refs/heads/YARN-3926
Commit: e6606c3fb7778521e19c41c99bf587b55addf142
Parents: 667a842
Author: Sunil G 
Authored: Wed Aug 16 15:25:36 2017 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:35:23 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |  70 +++-
 .../resource/DominantResourceCalculator.java| 317 ---
 .../hadoop/yarn/util/resource/Resources.java|  98 +++---
 3 files changed, 254 insertions(+), 231 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6606c3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 332296e..1e9f213 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -164,7 +164,6 @@ public abstract class Resource implements 
Comparable {
 "This method is implemented by ResourcePBImpl");
   }
 
-
   /**
* Get number of virtual cpu cores of the resource.
* 
@@ -179,7 +178,7 @@ public abstract class Resource implements 
Comparable {
   @Public
   @Evolving
   public abstract int getVirtualCores();
-  
+
   /**
* Set number of virtual cpu cores of the resource.
* 
@@ -225,6 +224,27 @@ public abstract class Resource implements 
Comparable {
   }
 
   /**
+   * Get ResourceInformation for a specified resource from a given index.
+   *
+   * @param index
+   *  of the resource
+   * @return the ResourceInformation object for the resource
+   * @throws ResourceNotFoundException
+   *   if the resource can't be found
+   */
+  @Public
+  @Evolving
+  public ResourceInformation getResourceInformation(int index)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+  throw new ResourceNotFoundException("Unknown resource at index '" + index
+  + "'. Vaid resources are: " + Arrays.toString(resources));
+}
+return resources[index];
+  }
+
+  /**
* Get the value for a specified resource. No information about the units is
* returned.
*
@@ -264,6 +284,29 @@ public abstract class Resource implements 
Comparable {
   }
 
   /**
+   * Set the ResourceInformation object for a particular resource.
+   *
+   * @param index
+   *  the resource index for which the ResourceInformation is provided
+   * @param resourceInformation
+   *  ResourceInformation object
+   * @throws ResourceNotFoundException
+   *   if the resource is not found
+   */
+  @Public
+  @Evolving
+  public void setResourceInformation(int index,
+  ResourceInformation resourceInformation)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+  throw new ResourceNotFoundException("Unknown resource at index '" + index
+  + "'. Valid resources are " + Arrays.toString(resources));
+}
+ResourceInformation.copy(resourceInformation, resources[index]);
+  }
+
+  /**
* Set the value of a resource in the ResourceInformation object. The unit of
* the value is assumed to be the one in the ResourceInformation object.
*
@@ -288,6 +331,29 @@ public abstract class Resource implements 
Comparable {
 storedResourceInfo.setValue(value);
   }
 
+  /**
+   * Set the value of a resource in the ResourceInformation object. The unit of
+   * the value is assumed to be the one in the ResourceInformation object.
+   *
+   * @param index
+   *  the resource index for which the value is provided.
+   * @param value
+   *  the value to set
+   * @throws ResourceNotFoundException
+   *   if the resource is not found
+   */
+  @Public
+  @Evolving
+  public void setResourceValue(int index, long value)
+  throws ResourceNotFoundException {
+ResourceInformation[] resources = getResources();
+if (index < 0 || index >= resources.length) {
+  

[30/50] [abbrv] hadoop git commit: YARN-5586. Update the Resources class to consider all resource types. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-5586. Update the Resources class to consider all resource types. 
Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ce06212
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ce06212
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ce06212

Branch: refs/heads/YARN-3926
Commit: 8ce062122408ce05390e51f14c2676e847327dfa
Parents: 551b178
Author: Rohith Sharma K S 
Authored: Mon Sep 12 10:44:26 2016 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:13:02 2017 +0530

--
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../resource/DominantResourceCalculator.java|  36 ++--
 .../yarn/util/resource/ResourceUtils.java   |   3 +-
 .../hadoop/yarn/util/resource/Resources.java| 138 +++--
 .../yarn/util/resource/TestResourceUtils.java   |  23 +++
 .../yarn/util/resource/TestResources.java   | 207 +--
 .../resourcemanager/resource/TestResources.java |  43 
 7 files changed, 366 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ce06212/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index b51121b..63b466b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -170,7 +170,9 @@ public class ResourcePBImpl extends Resource {
   resourceInformation.setName(resource);
 }
 initResources();
-resources.put(resource, resourceInformation);
+if (resources.containsKey(resource)) {
+  resources.put(resource, resourceInformation);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ce06212/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 3c4413c..7db1da4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -183,8 +183,10 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 Long requiredResourceValue = UnitsConversionUtil
 .convert(requiredResource.getUnits(), availableResource.getUnits(),
 requiredResource.getValue());
-Long tmp = availableResource.getValue() / requiredResourceValue;
-min = min < tmp ? min : tmp;
+if (requiredResourceValue != 0) {
+  Long tmp = availableResource.getValue() / requiredResourceValue;
+  min = min < tmp ? min : tmp;
+}
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
 "Error getting resource information for " + resource, ye);
@@ -301,10 +303,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 rResourceInformation.getUnits(),
 stepFactorResourceInformation.getValue());
-
-tmp.setValue(
-Math.min(roundUp(Math.max(rValue, minimumValue), stepFactorValue),
-maximumValue));
+Long value = Math.max(rValue, minimumValue);
+if (stepFactorValue != 0) {
+  value = roundUp(value, stepFactorValue);
+}
+tmp.setValue(Math.min(value, maximumValue));
 ret.setResourceInformation(resource, tmp);
   } catch (YarnException ye) {
 throw new IllegalArgumentException(
@@ -340,9 +343,11 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 .convert(stepFactorResourceInformation.getUnits(),
 rResourceInformation.getUnits(),
 

[27/50] [abbrv] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-08-16 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a180be4/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
--
diff --git a/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt 
b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
new file mode 100644
index 000..44880df
--- /dev/null
+++ b/hadoop-build-tools/src/main/resources/META-INF/LICENSE.txt
@@ -0,0 +1,1661 @@
+
+ Apache License
+   Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+  "License" shall mean the terms and conditions for use, reproduction,
+  and distribution as defined by Sections 1 through 9 of this document.
+
+  "Licensor" shall mean the copyright owner or entity authorized by
+  the copyright owner that is granting the License.
+
+  "Legal Entity" shall mean the union of the acting entity and all
+  other entities that control, are controlled by, or are under common
+  control with that entity. For the purposes of this definition,
+  "control" means (i) the power, direct or indirect, to cause the
+  direction or management of such entity, whether by contract or
+  otherwise, or (ii) ownership of fifty percent (50%) or more of the
+  outstanding shares, or (iii) beneficial ownership of such entity.
+
+  "You" (or "Your") shall mean an individual or Legal Entity
+  exercising permissions granted by this License.
+
+  "Source" form shall mean the preferred form for making modifications,
+  including but not limited to software source code, documentation
+  source, and configuration files.
+
+  "Object" form shall mean any form resulting from mechanical
+  transformation or translation of a Source form, including but
+  not limited to compiled object code, generated documentation,
+  and conversions to other media types.
+
+  "Work" shall mean the work of authorship, whether in Source or
+  Object form, made available under the License, as indicated by a
+  copyright notice that is included in or attached to the work
+  (an example is provided in the Appendix below).
+
+  "Derivative Works" shall mean any work, whether in Source or Object
+  form, that is based on (or derived from) the Work and for which the
+  editorial revisions, annotations, elaborations, or other modifications
+  represent, as a whole, an original work of authorship. For the purposes
+  of this License, Derivative Works shall not include works that remain
+  separable from, or merely link (or bind by name) to the interfaces of,
+  the Work and Derivative Works thereof.
+
+  "Contribution" shall mean any work of authorship, including
+  the original version of the Work and any modifications or additions
+  to that Work or Derivative Works thereof, that is intentionally
+  submitted to Licensor for inclusion in the Work by the copyright owner
+  or by an individual or Legal Entity authorized to submit on behalf of
+  the copyright owner. For the purposes of this definition, "submitted"
+  means any form of electronic, verbal, or written communication sent
+  to the Licensor or its representatives, including but not limited to
+  communication on electronic mailing lists, source code control systems,
+  and issue tracking systems that are managed by, or on behalf of, the
+  Licensor for the purpose of discussing and improving the Work, but
+  excluding communication that is conspicuously marked or otherwise
+  designated in writing by the copyright owner as "Not a Contribution."
+
+  "Contributor" shall mean Licensor and any individual or Legal Entity
+  on behalf of whom a Contribution has been received by Licensor and
+  subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  copyright license to reproduce, prepare Derivative Works of,
+  publicly display, publicly perform, sublicense, and distribute the
+  Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+  this License, each Contributor hereby grants to You a perpetual,
+  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+  (except as stated in this section) patent license to make, have made,
+  use, offer to sell, sell, import, and otherwise transfer the Work,
+  where such license applies only to those patent claims licensable
+  by such Contributor that are necessarily infringed by their
+  Contribution(s) alone or by 

[49/50] [abbrv] hadoop git commit: YARN-6908. ResourceProfilesManagerImpl is missing @Overrides on methods (Contributed by Sunil G. via Daniel Templeton)

2017-08-16 Thread sunilg
YARN-6908. ResourceProfilesManagerImpl is missing @Overrides on methods
(Contributed by Sunil G. via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f25bc507
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f25bc507
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f25bc507

Branch: refs/heads/YARN-3926
Commit: f25bc5077aadadafd3952e34cfa5cc078f312b66
Parents: e6606c3
Author: Daniel Templeton 
Authored: Wed Aug 16 09:41:52 2017 -0700
Committer: Sunil G 
Committed: Wed Aug 16 23:35:23 2017 +0530

--
 .../resource/ResourceProfilesManager.java   | 34 
 .../resource/ResourceProfilesManagerImpl.java   |  7 
 2 files changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f25bc507/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
index af54f05..c330e25 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManager.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.resource;
 
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.Resource;
 
@@ -28,19 +30,51 @@ import java.util.Map;
  * Interface for the resource profiles manager. Provides an interface to get
  * the list of available profiles and some helper functions.
  */
+@Public
+@Unstable
 public interface ResourceProfilesManager {
 
+  /**
+   * Method to handle all initialization steps for ResourceProfilesManager.
+   * @param config Configuration object
+   * @throws IOException when invalid resource profile names are loaded
+   */
   void init(Configuration config) throws IOException;
 
+  /**
+   * Get the resource capability associated with given profile name.
+   * @param profile name of resource profile
+   * @return resource capability for given profile
+   */
   Resource getProfile(String profile);
 
+  /**
+   * Get all supported resource profiles.
+   * @return a map of resource objects associated with each profile
+   */
   Map getResourceProfiles();
 
+  /**
+   * Reload profiles based on updated configuration.
+   * @throws IOException when invalid resource profile names are loaded
+   */
   void reloadProfiles() throws IOException;
 
+  /**
+   * Get default supported resource profile.
+   * @return resource object which is default
+   */
   Resource getDefaultProfile();
 
+  /**
+   * Get minimum supported resource profile.
+   * @return resource object which is minimum
+   */
   Resource getMinimumProfile();
 
+  /**
+   * Get maximum supported resource profile.
+   * @return resource object which is maximum
+   */
   Resource getMaximumProfile();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f25bc507/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
index b5ab384..42d38b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
+++ 

[50/50] [abbrv] hadoop git commit: YARN-6994. [YARN-3926] Remove last uses of Long from resource types code. (Daniel Templeton via Yufei Gu)

2017-08-16 Thread sunilg
YARN-6994. [YARN-3926] Remove last uses of Long from resource types code. 
(Daniel Templeton via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/667a8424
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/667a8424
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/667a8424

Branch: refs/heads/YARN-3926
Commit: 667a8424b2139bfd01eec33e3a6bdffc6c8e5468
Parents: eb7c9b7
Author: Yufei Gu 
Authored: Mon Aug 14 11:18:08 2017 -0700
Committer: Sunil G 
Committed: Wed Aug 16 23:35:23 2017 +0530

--
 .../main/java/org/apache/hadoop/yarn/api/records/Resource.java   | 4 ++--
 .../apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java   | 4 ++--
 .../java/org/apache/hadoop/yarn/util/resource/Resources.java | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/667a8424/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index a485a57..332296e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -273,14 +273,14 @@ public abstract class Resource implements 
Comparable {
*/
   @Public
   @Evolving
-  public void setResourceValue(String resource, Long value)
+  public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 if (resource.equals(MEMORY)) {
   this.setMemorySize(value);
   return;
 }
 if (resource.equals(VCORES)) {
-  this.setVirtualCores(value.intValue());
+  this.setVirtualCores((int)value);
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667a8424/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 561deb3..cbb040a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 import java.util.Arrays;
 import java.util.Map;
@@ -174,7 +174,7 @@ public class ResourcePBImpl extends BaseResource {
   }
 
   @Override
-  public void setResourceValue(String resource, Long value)
+  public void setResourceValue(String resource, long value)
   throws ResourceNotFoundException {
 maybeInitBuilder();
 if (resource == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/667a8424/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index f62114d..3cf78ed 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -104,7 +104,7 @@ public class Resources {
 }
 
 @Override
-public void setResourceValue(String resource, Long value)
+public void setResourceValue(String resource, long value)
 throws ResourceNotFoundException {
   throw 

[48/50] [abbrv] hadoop git commit: YARN-6935. [YARN-3926] ResourceProfilesManagerImpl.parseResource() has no need of the key parameter (Contributed by Manikandan R via Daniel Templeton)

2017-08-16 Thread sunilg
YARN-6935. [YARN-3926] ResourceProfilesManagerImpl.parseResource() has no need 
of the key parameter
(Contributed by Manikandan R via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb7c9b7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb7c9b7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb7c9b7b

Branch: refs/heads/YARN-3926
Commit: eb7c9b7bf01db652ec64c39e4ec868eaa5c59547
Parents: f84812c
Author: Daniel Templeton 
Authored: Fri Aug 11 16:32:13 2017 -0700
Committer: Sunil G 
Committed: Wed Aug 16 23:35:23 2017 +0530

--
 .../resource/ResourceProfilesManagerImpl.java   | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb7c9b7b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
index ab6..b5ab384 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceProfilesManagerImpl.java
@@ -87,22 +87,22 @@ public class ResourceProfilesManagerImpl implements 
ResourceProfilesManager {
 Iterator iterator = data.entrySet().iterator();
 while (iterator.hasNext()) {
   Map.Entry entry = (Map.Entry) iterator.next();
-  String key = entry.getKey().toString();
-  if (key.isEmpty()) {
+  String profileName = entry.getKey().toString();
+  if (profileName.isEmpty()) {
 throw new IOException(
 "Name of resource profile cannot be an empty string");
   }
   if (entry.getValue() instanceof Map) {
-Map value = (Map) entry.getValue();
+Map profileInfo = (Map) entry.getValue();
 // ensure memory and vcores are specified
-if (!value.containsKey(MEMORY) || !value.containsKey(VCORES)) {
+if (!profileInfo.containsKey(MEMORY) || 
!profileInfo.containsKey(VCORES)) {
   throw new IOException(
-  "Illegal resource profile definition; profile '" + key
+  "Illegal resource profile definition; profile '" + profileName
   + "' must contain '" + MEMORY + "' and '" + VCORES + "'");
 }
-Resource resource = parseResource(key, value);
-profiles.put(key, resource);
-LOG.info("Added profile '" + key + "' with resources " + resource);
+Resource resource = parseResource(profileInfo);
+profiles.put(profileName, resource);
+LOG.info("Added profile '" + profileName + "' with resources " + 
resource);
   }
 }
 // check to make sure mandatory profiles are present
@@ -116,9 +116,9 @@ public class ResourceProfilesManagerImpl implements 
ResourceProfilesManager {
 LOG.info("Loaded profiles " + profiles.keySet());
   }
 
-  private Resource parseResource(String key, Map value) throws IOException {
+  private Resource parseResource(Map profileInfo) throws IOException {
 Resource resource = Resource.newInstance(0, 0);
-Iterator iterator = value.entrySet().iterator();
+Iterator iterator = profileInfo.entrySet().iterator();
 Map resourceTypes = ResourceUtils
 .getResourceTypes();
 while (iterator.hasNext()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: YARN-5587. Add support for resource profiles. (vvasudev via asuresh)

2017-08-16 Thread sunilg
YARN-5587. Add support for resource profiles. (vvasudev via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85237a86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85237a86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85237a86

Branch: refs/heads/YARN-3926
Commit: 85237a86e4ca5044f0a79dfb0cf052b5b626efc1
Parents: 5f3ac3f
Author: Arun Suresh 
Authored: Tue Nov 15 01:01:07 2016 -0800
Committer: Sunil G 
Committed: Wed Aug 16 23:19:22 2017 +0530

--
 .../dev-support/findbugs-exclude.xml|   4 +
 .../RegisterApplicationMasterResponse.java  |   8 +
 .../yarn/api/records/ProfileCapability.java |  94 ++-
 .../hadoop/yarn/api/records/Resource.java   |  14 ++
 .../yarn/api/records/ResourceInformation.java   |  57 ++-
 .../yarn/api/records/ResourceRequest.java   |  43 -
 .../hadoop-yarn/hadoop-yarn-client/pom.xml  |   1 +
 .../hadoop/yarn/client/api/AMRMClient.java  | 117 +-
 .../yarn/client/api/impl/AMRMClientImpl.java| 152 ++---
 .../client/api/impl/RemoteRequestsTable.java| 109 +
 .../yarn/client/api/impl/TestAMRMClient.java| 141 ++--
 .../impl/TestAMRMClientContainerRequest.java|   8 +-
 .../api/impl/TestDistributedScheduling.java |  12 +-
 .../yarn/client/api/impl/TestNMClient.java  |   5 +-
 .../TestOpportunisticContainerAllocation.java   |  31 ++--
 .../src/test/resources/resource-profiles.json   |  18 +++
 ...RegisterApplicationMasterResponsePBImpl.java |  58 +++
 .../api/records/impl/pb/ResourcePBImpl.java |   4 +-
 .../records/impl/pb/ResourceRequestPBImpl.java  |  41 -
 .../yarn/util/resource/ResourceUtils.java   | 161 ++-
 .../hadoop/yarn/util/resource/Resources.java|  10 +-
 .../ApplicationMasterService.java   |   1 +
 .../resourcemanager/DefaultAMSProcessor.java|   8 +
 .../server/resourcemanager/RMServerUtils.java   |  50 ++
 .../resource/ResourceProfilesManagerImpl.java   |   4 +
 .../scheduler/AbstractYarnScheduler.java|  44 +
 .../scheduler/ClusterNodeTracker.java   |   3 +-
 .../scheduler/SchedulerUtils.java   |  10 ++
 .../scheduler/capacity/CapacityScheduler.java   |   4 +-
 .../scheduler/fair/FairScheduler.java   |   4 +-
 .../scheduler/fifo/FifoScheduler.java   |  13 +-
 .../yarn/server/resourcemanager/MockRM.java |   2 +
 .../server/resourcemanager/TestAppManager.java  |   1 +
 .../TestApplicationMasterService.java   |  35 
 .../scheduler/fair/TestFairScheduler.java   |   4 +
 .../hadoop/yarn/server/MiniYARNCluster.java |   2 +
 36 files changed, 1100 insertions(+), 173 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85237a86/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6825a36..ce7a9c6 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -154,6 +154,10 @@
 
   
   
+
+
+  
+  
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85237a86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
index 0b886dd..8fa8563 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
@@ -204,4 +204,12 @@ public abstract class RegisterApplicationMasterResponse {
   @Unstable
   public abstract void setSchedulerResourceTypes(
   EnumSet types);
+
+  @Public
+  @Unstable
+  public abstract Map getResourceProfiles();
+
+  @Private
+  @Unstable
+  public abstract void setResourceProfiles(Map profiles);
 }


[41/50] [abbrv] hadoop git commit: YARN-6445. [YARN-3926] Performance improvements in resource profile branch with respect to SLS. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-6445. [YARN-3926] Performance improvements in resource profile branch with 
respect to SLS. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4f51206
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4f51206
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4f51206

Branch: refs/heads/YARN-3926
Commit: c4f512060850b7550beb30e8d6eb0307fba4ee6b
Parents: dd1244a
Author: Sunil G 
Authored: Tue Apr 25 11:53:11 2017 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:27:42 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   | 19 +--
 .../yarn/api/records/ResourceInformation.java   | 51 ++---
 .../hadoop/yarn/util/UnitsConversionUtil.java   | 34 ++-
 .../yarn/conf/TestResourceInformation.java  |  4 +-
 .../yarn/util/TestUnitsConversionUtil.java  | 60 ++--
 .../api/records/impl/pb/ResourcePBImpl.java | 23 
 .../resource/DominantResourceCalculator.java| 54 --
 .../hadoop/yarn/util/resource/Resources.java| 18 +++---
 .../yarn/util/resource/TestResourceUtils.java   |  1 +
 .../yarn/util/resource/TestResources.java   | 12 +++-
 .../resource/ResourceProfilesManagerImpl.java   |  3 +-
 11 files changed, 157 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4f51206/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index c349a32..4356986 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -107,12 +107,23 @@ public abstract class Resource implements 
Comparable {
   @InterfaceStability.Unstable
   public static Resource newInstance(Resource resource) {
 Resource ret = Resource.newInstance(0, 0);
-for (Map.Entry entry : resource.getResources()
+Resource.copy(resource, ret);
+return ret;
+  }
+
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public static void copy(Resource source, Resource dest) {
+for (Map.Entry entry : source.getResources()
 .entrySet()) {
-  ret.setResourceInformation(entry.getKey(),
-  ResourceInformation.newInstance(entry.getValue()));
+  try {
+ResourceInformation.copy(entry.getValue(),
+dest.getResourceInformation(entry.getKey()));
+  } catch (YarnException ye) {
+dest.setResourceInformation(entry.getKey(),
+ResourceInformation.newInstance(entry.getValue()));
+  }
 }
-return ret;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4f51206/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 7d74efc..d75b441 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -30,9 +30,9 @@ public class ResourceInformation implements 
Comparable {
   private String name;
   private String units;
   private ResourceTypes resourceType;
-  private Long value;
-  private Long minimumAllocation;
-  private Long maximumAllocation;
+  private long value;
+  private long minimumAllocation;
+  private long maximumAllocation;
 
   private static final String MEMORY_URI = "memory-mb";
   private static final String VCORES_URI = "vcores";
@@ -106,7 +106,7 @@ public class ResourceInformation implements 
Comparable {
*
* @return the resource value
*/
-  public Long getValue() {
+  public long getValue() {
 return value;
   }
 
@@ -115,7 +115,7 @@ public class ResourceInformation implements 
Comparable {
*
* @param rValue the resource value
*/
-  public void setValue(Long rValue) {
+  public void setValue(long rValue) {
   

[37/50] [abbrv] hadoop git commit: YARN-5587. Add support for resource profiles. (vvasudev via asuresh)

2017-08-16 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/85237a86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
index 1a70933..032bbc3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java
@@ -33,6 +33,8 @@ import 
org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfilesProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProfileEntry;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
@@ -59,6 +61,7 @@ public class RegisterApplicationMasterResponsePBImpl extends
   private List containersFromPreviousAttempts = null;
   private List nmTokens = null;
   private EnumSet schedulerResourceTypes = null;
+  private Map profiles = null;
 
   public RegisterApplicationMasterResponsePBImpl() {
 builder = RegisterApplicationMasterResponseProto.newBuilder();
@@ -123,6 +126,9 @@ public class RegisterApplicationMasterResponsePBImpl extends
 if(schedulerResourceTypes != null) {
   addSchedulerResourceTypes();
 }
+if (profiles != null) {
+  addResourceProfiles();
+}
   }
 
 
@@ -433,6 +439,58 @@ public class RegisterApplicationMasterResponsePBImpl 
extends
 this.schedulerResourceTypes.addAll(types);
   }
 
+  private void addResourceProfiles() {
+maybeInitBuilder();
+builder.clearResourceProfiles();
+if (profiles == null) {
+  return;
+}
+ResourceProfilesProto.Builder profilesBuilder =
+ResourceProfilesProto.newBuilder();
+for (Map.Entry entry : profiles.entrySet()) {
+  ResourceProfileEntry.Builder entryBuilder =
+  ResourceProfileEntry.newBuilder();
+  entryBuilder.setName(entry.getKey());
+  entryBuilder.setResources(convertToProtoFormat(entry.getValue()));
+  profilesBuilder.addResourceProfilesMap(entryBuilder.build());
+}
+builder.setResourceProfiles(profilesBuilder.build());
+  }
+
+  private void initResourceProfiles() {
+if (this.profiles != null) {
+  return;
+}
+this.profiles = new HashMap<>();
+RegisterApplicationMasterResponseProtoOrBuilder p =
+viaProto ? proto : builder;
+
+if (p.hasResourceProfiles()) {
+  ResourceProfilesProto profilesProto = p.getResourceProfiles();
+  for (ResourceProfileEntry entry : profilesProto
+  .getResourceProfilesMapList()) {
+this.profiles
+.put(entry.getName(), 
convertFromProtoFormat(entry.getResources()));
+  }
+}
+  }
+
+  @Override
+  public Map getResourceProfiles() {
+initResourceProfiles();
+return this.profiles;
+  }
+
+  @Override
+  public void setResourceProfiles(Map profilesMap) {
+if (profilesMap == null) {
+  return;
+}
+initResourceProfiles();
+this.profiles.clear();
+this.profiles.putAll(profilesMap);
+  }
+
   private Resource convertFromProtoFormat(ResourceProto resource) {
 return new ResourcePBImpl(resource);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85237a86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 63b466b..955ea52 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -144,8 +144,8 @@ public class 

[33/50] [abbrv] hadoop git commit: YARN-4172. Extend DominantResourceCalculator to account for all resources. (Varun Vasudev via wangda)

2017-08-16 Thread sunilg
YARN-4172. Extend DominantResourceCalculator to account for all resources. 
(Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5f46865
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5f46865
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5f46865

Branch: refs/heads/YARN-3926
Commit: d5f468657177fc4d665eddefb0b4936bdf76100e
Parents: 6a180be
Author: Wangda Tan 
Authored: Fri Jan 29 10:53:31 2016 +0800
Committer: Sunil G 
Committed: Wed Aug 16 23:13:02 2017 +0530

--
 .../resource/DominantResourceCalculator.java| 380 +--
 1 file changed, 273 insertions(+), 107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f46865/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 7697e1d..a94e7a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -22,25 +22,31 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.UnitsConversionUtil;
+
+import java.util.HashSet;
+import java.util.Set;
 
 /**
- * A {@link ResourceCalculator} which uses the concept of  
+ * A {@link ResourceCalculator} which uses the concept of
  * dominant resource to compare multi-dimensional resources.
  *
- * Essentially the idea is that the in a multi-resource environment, 
- * the resource allocation should be determined by the dominant share 
- * of an entity (user or queue), which is the maximum share that the 
- * entity has been allocated of any resource. 
- * 
- * In a nutshell, it seeks to maximize the minimum dominant share across 
- * all entities. 
- * 
+ * Essentially the idea is that the in a multi-resource environment,
+ * the resource allocation should be determined by the dominant share
+ * of an entity (user or queue), which is the maximum share that the
+ * entity has been allocated of any resource.
+ *
+ * In a nutshell, it seeks to maximize the minimum dominant share across
+ * all entities.
+ *
  * For example, if user A runs CPU-heavy tasks and user B runs
- * memory-heavy tasks, it attempts to equalize CPU share of user A 
- * with Memory-share of user B. 
- * 
+ * memory-heavy tasks, it attempts to equalize CPU share of user A
+ * with Memory-share of user B.
+ *
  * In the single resource case, it reduces to max-min fairness for that 
resource.
- * 
+ *
  * See the Dominant Resource Fairness paper for more details:
  * www.cs.berkeley.edu/~matei/papers/2011/nsdi_drf.pdf
  */
@@ -50,6 +56,56 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   private static final Log LOG =
   LogFactory.getLog(DominantResourceCalculator.class);
 
+
+  private Set resourceNames;
+
+  public DominantResourceCalculator() {
+resourceNames = new HashSet<>();
+resourceNames.add(ResourceInformation.MEMORY.getName());
+resourceNames.add(ResourceInformation.VCORES.getName());
+  }
+
+  /**
+   * Compare two resources - if the value for every resource type for the lhs
+   * is greater than that of the rhs, return 1. If the value for every resource
+   * type in the lhs is less than the rhs, return -1. Otherwise, return 0
+   *
+   * @param lhs resource to be compared
+   * @param rhs resource to be compared
+   * @return 0, 1, or -1
+   */
+  private int compare(Resource lhs, Resource rhs) {
+boolean lhsGreater = false;
+boolean rhsGreater = false;
+int ret = 0;
+
+for (String rName : resourceNames) {
+  try {
+ResourceInformation lhsResourceInformation =
+lhs.getResourceInformation(rName);
+ResourceInformation rhsResourceInformation =
+rhs.getResourceInformation(rName);
+int diff = lhsResourceInformation.compareTo(rhsResourceInformation);
+if (diff >= 1) {
+  lhsGreater = true;
+} else 

[35/50] [abbrv] hadoop git commit: YARN-4829. Add support for binary units in Resource class.(vvasudev via asuresh)

2017-08-16 Thread sunilg
YARN-4829. Add support for binary units in Resource class.(vvasudev via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cba1702f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cba1702f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cba1702f

Branch: refs/heads/YARN-3926
Commit: cba1702fc8af882da3112df880c77c3f9ec1906d
Parents: 97b8b54
Author: Arun Suresh 
Authored: Thu Mar 17 23:50:22 2016 -0700
Committer: Sunil G 
Committed: Wed Aug 16 23:13:02 2017 +0530

--
 .../yarn/api/records/ResourceInformation.java   |  2 +-
 .../hadoop/yarn/util/UnitsConversionUtil.java   | 45 ++--
 .../yarn/util/TestUnitsConversionUtil.java  | 17 +++-
 .../api/records/impl/pb/ResourcePBImpl.java |  2 +-
 .../yarn/util/resource/TestResourceUtils.java   |  2 +-
 5 files changed, 52 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba1702f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 80e3192..a17e81b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -36,7 +36,7 @@ public class ResourceInformation implements 
Comparable {
   private static final String VCORES_URI = "vcores";
 
   public static final ResourceInformation MEMORY_MB =
-  ResourceInformation.newInstance(MEMORY_URI, "M");
+  ResourceInformation.newInstance(MEMORY_URI, "Mi");
   public static final ResourceInformation VCORES =
   ResourceInformation.newInstance(VCORES_URI);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cba1702f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
index 7785263..47bb3df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/UnitsConversionUtil.java
@@ -46,7 +46,8 @@ public class UnitsConversionUtil {
   }
 
   private static final String[] UNITS =
-  {"p", "n", "u", "m", "", "k", "M", "G", "T", "P"};
+  { "p", "n", "u", "m", "", "k", "M", "G", "T", "P", "Ki", "Mi", "Gi", 
"Ti",
+  "Pi" };
   private static final List SORTED_UNITS = Arrays.asList(UNITS);
   public static final Set KNOWN_UNITS = createKnownUnitsSet();
   private static final Converter PICO =
@@ -65,6 +66,15 @@ public class UnitsConversionUtil {
   private static final Converter PETA =
   new Converter(1000L * 1000L * 1000L * 1000L * 1000L, 1L);
 
+  private static final Converter KILO_BINARY = new Converter(1024L, 1L);
+  private static final Converter MEGA_BINARY = new Converter(1024L * 1024L, 
1L);
+  private static final Converter GIGA_BINARY =
+  new Converter(1024L * 1024L * 1024L, 1L);
+  private static final Converter TERA_BINARY =
+  new Converter(1024L * 1024L * 1024L * 1024L, 1L);
+  private static final Converter PETA_BINARY =
+  new Converter(1024L * 1024L * 1024L * 1024L * 1024L, 1L);
+
   private static Set createKnownUnitsSet() {
 Set ret = new HashSet<>();
 ret.addAll(Arrays.asList(UNITS));
@@ -93,6 +103,16 @@ public class UnitsConversionUtil {
   return TERA;
 case "P":
   return PETA;
+case "Ki":
+  return KILO_BINARY;
+case "Mi":
+  return MEGA_BINARY;
+case "Gi":
+  return GIGA_BINARY;
+case "Ti":
+  return TERA_BINARY;
+case "Pi":
+  return PETA_BINARY;
 default:
   throw new IllegalArgumentException(
   "Unknown unit '" + unit + "'. Known units are " + KNOWN_UNITS);
@@ -112,28 +132,29 @@ public class UnitsConversionUtil {
 if (toUnit == null || fromUnit == null || fromValue == null) {
   throw new IllegalArgumentException("One or more arguments are null");
 }
-Long tmp;
 String overflowMsg =
 

[45/50] [abbrv] hadoop git commit: YARN-6788. [YARN-3926] Improve performance of resource profile branch (Contributed by Sunil Govindan via Daniel Templeton)

2017-08-16 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f84812c5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
deleted file mode 100644
index 86cf872..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ /dev/null
@@ -1,488 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.util.resource;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.conf.ConfigurationProvider;
-import org.apache.hadoop.yarn.conf.ConfigurationProviderFactory;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Helper class to read the resource-types to be supported by the system.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public class ResourceUtils {
-
-  public static final String UNITS = ".units";
-  public static final String TYPE = ".type";
-  public static final String MINIMUM_ALLOCATION = ".minimum-allocation";
-  public static final String MAXIMUM_ALLOCATION = ".maximum-allocation";
-
-  private static final String MEMORY = ResourceInformation.MEMORY_MB.getName();
-  private static final String VCORES = ResourceInformation.VCORES.getName();
-
-  private static final Set DISALLOWED_NAMES = new HashSet<>();
-  static {
-DISALLOWED_NAMES.add("memory");
-DISALLOWED_NAMES.add(MEMORY);
-DISALLOWED_NAMES.add(VCORES);
-  }
-
-  private static volatile Object lock;
-  private static Map readOnlyResources;
-  private static volatile Object nodeLock;
-  private static Map readOnlyNodeResources;
-
-
-  static final Log LOG = LogFactory.getLog(ResourceUtils.class);
-
-  private ResourceUtils() {
-  }
-
-  private static void checkMandatatoryResources(
-  Map resourceInformationMap)
-  throws YarnRuntimeException {
-if (resourceInformationMap.containsKey(MEMORY)) {
-  ResourceInformation memInfo = resourceInformationMap.get(MEMORY);
-  String memUnits = ResourceInformation.MEMORY_MB.getUnits();
-  ResourceTypes memType = ResourceInformation.MEMORY_MB.getResourceType();
-  if (!memInfo.getUnits().equals(memUnits) || !memInfo.getResourceType()
-  .equals(memType)) {
-throw new YarnRuntimeException(
-"Attempt to re-define mandatory resource 'memory-mb'. It can only"
-+ " be of type 'COUNTABLE' and have units 'Mi'.");
-  }
-}
-
-if (resourceInformationMap.containsKey(VCORES)) {
-  ResourceInformation vcoreInfo = resourceInformationMap.get(VCORES);
-  String vcoreUnits = ResourceInformation.VCORES.getUnits();
-  ResourceTypes vcoreType = ResourceInformation.VCORES.getResourceType();
-  if (!vcoreInfo.getUnits().equals(vcoreUnits) || !vcoreInfo
-  .getResourceType().equals(vcoreType)) {
-throw new YarnRuntimeException(
-"Attempt to re-define mandatory 

[40/50] [abbrv] hadoop git commit: YARN-6761. Fix build for YARN-3926 branch. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-6761. Fix build for YARN-3926 branch. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62650877
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62650877
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62650877

Branch: refs/heads/YARN-3926
Commit: 6265087766eb2486a0e6d2bcdfbfc8448125a719
Parents: c4f5120
Author: Sunil G 
Authored: Mon Jul 10 09:21:26 2017 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:27:42 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   | 112 +++
 .../resource/DominantResourceCalculator.java|   1 +
 2 files changed, 90 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62650877/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 4356986..9a8e2ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -29,6 +29,8 @@ import 
org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 
 /**
@@ -58,12 +60,17 @@ import java.util.Map;
 @Stable
 public abstract class Resource implements Comparable {
 
+  private static Resource tmpResource = Records.newRecord(Resource.class);
+
   private static class SimpleResource extends Resource {
 private long memory;
 private long vcores;
+private Map resourceInformationMap;
+
 SimpleResource(long memory, long vcores) {
   this.memory = memory;
   this.vcores = vcores;
+
 }
 @Override
 public int getMemory() {
@@ -89,17 +96,44 @@ public abstract class Resource implements 
Comparable {
 public void setVirtualCores(int vcores) {
   this.vcores = vcores;
 }
+@Override
+public Map getResources() {
+  if (resourceInformationMap == null) {
+resourceInformationMap = new HashMap<>();
+resourceInformationMap.put(ResourceInformation.MEMORY_MB.getName(),
+ResourceInformation.newInstance(ResourceInformation.MEMORY_MB));
+resourceInformationMap.put(ResourceInformation.VCORES.getName(),
+ResourceInformation.newInstance(ResourceInformation.VCORES));
+  }
+  resourceInformationMap.get(ResourceInformation.MEMORY_MB.getName())
+  .setValue(this.memory);
+  resourceInformationMap.get(ResourceInformation.VCORES.getName())
+  .setValue(this.vcores);
+  return Collections.unmodifiableMap(resourceInformationMap);
+}
   }
 
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {
+if (tmpResource.getResources().size() > 2) {
+  Resource ret = Records.newRecord(Resource.class);
+  ret.setMemorySize(memory);
+  ret.setVirtualCores(vCores);
+  return ret;
+}
 return new SimpleResource(memory, vCores);
   }
 
   @Public
   @Stable
   public static Resource newInstance(long memory, int vCores) {
+if (tmpResource.getResources().size() > 2) {
+  Resource ret = Records.newRecord(Resource.class);
+  ret.setMemorySize(memory);
+  ret.setVirtualCores(vCores);
+  return ret;
+}
 return new SimpleResource(memory, vCores);
   }
 
@@ -116,13 +150,7 @@ public abstract class Resource implements 
Comparable {
   public static void copy(Resource source, Resource dest) {
 for (Map.Entry entry : source.getResources()
 .entrySet()) {
-  try {
-ResourceInformation.copy(entry.getValue(),
-dest.getResourceInformation(entry.getKey()));
-  } catch (YarnException ye) {
-dest.setResourceInformation(entry.getKey(),
-ResourceInformation.newInstance(entry.getValue()));
-  }
+  dest.setResourceInformation(entry.getKey(), entry.getValue());
 }
   }
 
@@ -234,8 +262,15 @@ public abstract class Resource implements 
Comparable {
*/
   @Public
   @Evolving
-  public abstract ResourceInformation getResourceInformation(String resource)
-  throws YarnException;
+  public ResourceInformation getResourceInformation(String 

[44/50] [abbrv] hadoop git commit: YARN-6232. Update resource usage and preempted resource calculations to take into account all resource types. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-6232. Update resource usage and preempted resource calculations to take 
into account all resource types. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd1244ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd1244ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd1244ab

Branch: refs/heads/YARN-3926
Commit: dd1244ab6e3f513f78336081a30195e813b6914b
Parents: c280ad0
Author: Sunil G 
Authored: Mon Mar 6 11:34:20 2017 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:27:42 2017 +0530

--
 .../records/ApplicationResourceUsageReport.java |  58 ++-
 .../src/main/proto/yarn_protos.proto|   7 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  35 +++--
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  16 +-
 .../ApplicationResourceUsageReportPBImpl.java   | 151 ---
 .../yarn/api/records/impl/pb/ProtoUtils.java|  34 +
 .../apache/hadoop/yarn/util/StringHelper.java   |  36 +
 .../hadoop/yarn/api/BasePBImplRecordsTest.java  |  12 ++
 .../hadoop/yarn/api/TestPBImplRecords.java  |   4 +
 ...pplicationHistoryManagerOnTimelineStore.java |  18 ++-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  16 +-
 .../server/resourcemanager/RMAppManager.java|   8 +-
 .../server/resourcemanager/RMServerUtils.java   |  10 +-
 .../resourcemanager/recovery/RMStateStore.java  |   7 +-
 .../records/ApplicationAttemptStateData.java|  89 +--
 .../pb/ApplicationAttemptStateDataPBImpl.java   |  50 ++
 .../server/resourcemanager/rmapp/RMAppImpl.java |  43 +++---
 .../resourcemanager/rmapp/RMAppMetrics.java |  41 +++--
 .../attempt/AggregateAppResourceUsage.java  |  34 ++---
 .../rmapp/attempt/RMAppAttemptImpl.java |  32 ++--
 .../rmapp/attempt/RMAppAttemptMetrics.java  | 107 ++---
 .../rmcontainer/RMContainerImpl.java|  16 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  47 +++---
 .../resourcemanager/webapp/RMAppBlock.java  |  14 +-
 .../resourcemanager/webapp/dao/AppInfo.java |  15 +-
 .../webapp/dao/ResourceInfo.java|  36 -
 .../webapp/dao/SchedulerInfo.java   |   2 +-
 .../yarn_server_resourcemanager_recovery.proto  |   2 +
 .../server/resourcemanager/TestAppManager.java  |   7 +-
 .../TestContainerResourceUsage.java |   7 +-
 .../applicationsmanager/MockAsm.java|   9 +-
 .../metrics/TestSystemMetricsPublisher.java |  15 +-
 .../TestSystemMetricsPublisherForV2.java|  22 ++-
 .../recovery/RMStateStoreTestBase.java  |   8 +-
 .../recovery/TestZKRMStateStore.java|  23 +--
 .../resourcemanager/webapp/TestAppPage.java |   8 +-
 .../webapp/TestRMWebAppFairScheduler.java   |   5 +-
 .../DefaultClientRequestInterceptor.java|  16 ++
 .../clientrm/FederationClientInterceptor.java   |  15 ++
 .../router/clientrm/RouterClientRMService.java  |  18 +++
 .../PassThroughClientRequestInterceptor.java|  16 ++
 41 files changed, 855 insertions(+), 254 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd1244ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
index 3cf8f3d..f9c8975 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationResourceUsageReport.java
@@ -24,6 +24,9 @@ import 
org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * Contains various scheduling metrics to be reported by UI and CLI.
  */
@@ -35,9 +38,9 @@ public abstract class ApplicationResourceUsageReport {
   @Unstable
   public static ApplicationResourceUsageReport newInstance(
   int numUsedContainers, int numReservedContainers, Resource usedResources,
-  Resource reservedResources, Resource neededResources, long memorySeconds,
-  long vcoreSeconds, float queueUsagePerc, float clusterUsagePerc,
-  long preemptedMemorySeconds, long preemptedVcoresSeconds) {
+  Resource reservedResources, Resource 

[46/50] [abbrv] hadoop git commit: YARN-6788. [YARN-3926] Improve performance of resource profile branch (Contributed by Sunil Govindan via Daniel Templeton)

2017-08-16 Thread sunilg
YARN-6788. [YARN-3926] Improve performance of resource profile branch
(Contributed by Sunil Govindan via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f84812c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f84812c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f84812c5

Branch: refs/heads/YARN-3926
Commit: f84812c5f11bfc8520b2eb215f3b25d7a956c161
Parents: 3beeb43
Author: Daniel Templeton 
Authored: Fri Aug 4 08:42:34 2017 -0700
Committer: Sunil G 
Committed: Wed Aug 16 23:35:22 2017 +0530

--
 .../dev-support/findbugs-exclude.xml|  18 +
 .../yarn/api/records/ProfileCapability.java |   8 +-
 .../hadoop/yarn/api/records/Resource.java   | 234 
 .../yarn/api/records/ResourceInformation.java   |  13 +-
 .../yarn/api/records/impl/BaseResource.java | 133 +
 .../yarn/api/records/impl/package-info.java |  22 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |   8 +-
 .../yarn/util/resource/ResourceUtils.java   | 534 +++
 .../hadoop/yarn/util/resource/package-info.java |  22 +
 .../yarn/client/api/impl/TestAMRMClient.java|   8 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java|   5 +-
 .../api/records/impl/pb/ResourcePBImpl.java | 110 ++--
 .../resource/DominantResourceCalculator.java|  67 ++-
 .../yarn/util/resource/ResourceUtils.java   | 488 -
 .../hadoop/yarn/util/resource/Resources.java| 194 ---
 .../yarn/util/resource/TestResourceUtils.java   |  14 +-
 .../yarn/util/resource/TestResources.java   |   7 +-
 .../resource/ResourceProfilesManagerImpl.java   |   8 +-
 .../rmapp/attempt/RMAppAttemptMetrics.java  |  11 +-
 .../scheduler/SchedulerApplicationAttempt.java  |   9 +-
 .../webapp/dao/SchedulerInfo.java   |   3 +-
 .../server/resourcemanager/TestAppManager.java  |   1 +
 22 files changed, 1045 insertions(+), 872 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f84812c5/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index ce7a9c6..a5b4021 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -603,4 +603,22 @@
 
   
 
+  
+
+
+
+
+  
+
+  
+
+
+
+  
+
+  
+
+
+
+  
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f84812c5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
index 1a8d1c3..2cb4670 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProfileCapability.java
@@ -162,10 +162,10 @@ public abstract class ProfileCapability {
 
 if (capability.getProfileCapabilityOverride() != null &&
 !capability.getProfileCapabilityOverride().equals(none)) {
-  for (Map.Entry entry : capability
-  .getProfileCapabilityOverride().getResources().entrySet()) {
-if (entry.getValue() != null && entry.getValue().getValue() >= 0) {
-  resource.setResourceInformation(entry.getKey(), entry.getValue());
+  for (ResourceInformation entry : capability
+  .getProfileCapabilityOverride().getResources()) {
+if (entry != null && entry.getValue() >= 0) {
+  resource.setResourceInformation(entry.getName(), entry);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f84812c5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 9a8e2ec..a485a57 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 

[36/50] [abbrv] hadoop git commit: YARN-5708. Implement APIs to get resource profiles from the RM. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-5708. Implement APIs to get resource profiles from the RM. Contributed by 
Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f3ac3fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f3ac3fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f3ac3fb

Branch: refs/heads/YARN-3926
Commit: 5f3ac3fb048d2316a76c045dcf5b595dca57095d
Parents: fd14142
Author: Varun Vasudev 
Authored: Sat Oct 22 20:15:47 2016 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:14:28 2017 +0530

--
 .../hadoop/mapred/ResourceMgrDelegate.java  |  13 ++
 .../hadoop/mapred/TestClientRedirect.java   |  17 +++
 .../yarn/api/ApplicationClientProtocol.java |  37 +
 .../GetAllResourceProfilesRequest.java  |  35 +
 .../GetAllResourceProfilesResponse.java |  60 
 .../GetResourceProfileRequest.java  |  59 
 .../GetResourceProfileResponse.java |  68 +
 .../yarn/api/records/ProfileCapability.java |  88 
 .../main/proto/applicationclient_protocol.proto |   2 +
 .../src/main/proto/yarn_protos.proto|  15 ++
 .../src/main/proto/yarn_service_protos.proto|  16 +++
 .../hadoop/yarn/client/api/YarnClient.java  |  25 
 .../yarn/client/api/impl/YarnClientImpl.java|  19 +++
 .../ApplicationClientProtocolPBClientImpl.java  |  36 +
 .../ApplicationClientProtocolPBServiceImpl.java |  42 ++
 .../pb/GetAllResourceProfilesRequestPBImpl.java |  55 +++
 .../GetAllResourceProfilesResponsePBImpl.java   | 142 +++
 .../pb/GetResourceProfileRequestPBImpl.java | 101 +
 .../pb/GetResourceProfileResponsePBImpl.java| 112 +++
 .../impl/pb/ProfileCapabilityPBImpl.java| 134 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |  34 +
 .../yarn/server/MockResourceManagerFacade.java  |  16 +++
 .../server/resourcemanager/ClientRMService.java |  41 ++
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 ++
 .../server/resourcemanager/ResourceManager.java |   9 ++
 26 files changed, 1193 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f3ac3fb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 62aa497..a365f80 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
@@ -517,4 +518,16 @@ public class ResourceMgrDelegate extends YarnClient {
   throws YarnException, IOException {
 client.killApplication(appId, diagnostics);
   }
+
+  @Override
+  public Map getResourceProfiles()
+  throws YarnException, IOException {
+return client.getResourceProfiles();
+  }
+
+  @Override
+  public Resource getResourceProfile(String profile)
+  throws YarnException, IOException {
+return client.getResourceProfile(profile);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f3ac3fb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index 65eac65..cc50be0 100644
--- 

[19/50] [abbrv] hadoop git commit: HDFS-12054. FSNamesystem#addErasureCodingPolicies should call checkNameNodeSafeMode() to ensure Namenode is not in safemode. Contributed by lufei.

2017-08-16 Thread sunilg
HDFS-12054. FSNamesystem#addErasureCodingPolicies should call 
checkNameNodeSafeMode() to ensure Namenode is not in safemode. Contributed by 
lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1040bae6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1040bae6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1040bae6

Branch: refs/heads/YARN-3926
Commit: 1040bae6fcbae7079d8126368cdeac60831a4d0c
Parents: 2e43c28
Author: Wei-Chiu Chuang 
Authored: Tue Aug 15 07:38:43 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 15 07:38:43 2017 -0700

--
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  2 ++
 .../java/org/apache/hadoop/hdfs/TestSafeMode.java   | 16 
 2 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040bae6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b1639b2..caf73f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7081,6 +7081,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   checkOperation(OperationCategory.WRITE);
   for (ErasureCodingPolicy policy : policies) {
 try {
+  checkOperation(OperationCategory.WRITE);
+  checkNameNodeSafeMode("Cannot add erasure coding policy");
   ErasureCodingPolicy newPolicy =
   FSDirErasureCodingOp.addErasureCodePolicy(this, policy);
   addECPolicyName = newPolicy.getName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040bae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index f03b440..bc95ec7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -466,6 +468,20 @@ public class TestSafeMode {
   // expected
 }
 
+ECSchema toAddSchema = new ECSchema("testcodec", 3, 2);
+ErasureCodingPolicy newPolicy =
+new ErasureCodingPolicy(toAddSchema, 128 * 1024);
+ErasureCodingPolicy[] policyArray =
+new ErasureCodingPolicy[]{newPolicy};
+try {
+  dfs.addErasureCodingPolicies(policyArray);
+  fail("AddErasureCodingPolicies should have failed.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(
+  "Cannot add erasure coding policy", ioe);
+  // expected
+}
+
 assertFalse("Could not leave SM",
 dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: YARN-5242. Update DominantResourceCalculator to consider all resource types in calculations. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-5242. Update DominantResourceCalculator to consider all resource types in 
calculations. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/551b1785
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/551b1785
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/551b1785

Branch: refs/heads/YARN-3926
Commit: 551b178530bbb59267475687a89c10d74b40f7f2
Parents: b3235c0
Author: Rohith Sharma K S 
Authored: Tue Jul 26 14:13:03 2016 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:13:02 2017 +0530

--
 .../hadoop/yarn/api/records/Resource.java   |  7 ++
 .../api/records/impl/pb/ResourcePBImpl.java |  2 +-
 .../resource/DominantResourceCalculator.java| 23 
 .../yarn/util/resource/ResourceUtils.java   |  5 +++--
 .../hadoop/yarn/util/resource/Resources.java|  6 +
 5 files changed, 31 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/551b1785/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index c9c6a7a..507247e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -327,6 +327,8 @@ public abstract class Resource implements 
Comparable {
 otherResources = other.getResources();
 long diff = thisResources.size() - otherResources.size();
 if (diff == 0) {
+  // compare memory and vcores first(in that order) to preserve
+  // existing behaviour
   if (thisResources.keySet().equals(otherResources.keySet())) {
 diff = this.getMemorySize() - other.getMemorySize();
 if (diff == 0) {
@@ -335,6 +337,11 @@ public abstract class Resource implements 
Comparable {
 if (diff == 0) {
   for (Map.Entry entry : thisResources
   .entrySet()) {
+if (entry.getKey().equals(ResourceInformation.MEMORY_MB.getName())
+|| entry.getKey()
+.equals(ResourceInformation.VCORES.getName())) {
+  continue;
+}
 diff =
 entry.getValue().compareTo(otherResources.get(entry.getKey()));
 if (diff != 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/551b1785/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index 86ae41f..b51121b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -242,7 +242,7 @@ public class ResourcePBImpl extends Resource {
 builder.addResourceValueMap(e);
   }
 }
-builder.setMemory(this.getMemory());
+builder.setMemory(this.getMemorySize());
 builder.setVirtualCores(this.getVirtualCores());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/551b1785/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 0412c0f..3c4413c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -397,10 +397,25 @@ public class DominantResourceCalculator extends 

[42/50] [abbrv] hadoop git commit: YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang Cang.

2017-08-16 Thread sunilg
YARN-6786. [YARN-3926] ResourcePBImpl imports cleanup. Contributed by Yeliang 
Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3beeb43f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3beeb43f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3beeb43f

Branch: refs/heads/YARN-3926
Commit: 3beeb43fa8da2d4736e90690d8e6d7dc76fc6f62
Parents: 6265087
Author: Sunil G 
Authored: Thu Jul 13 16:30:59 2017 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:27:42 2017 +0530

--
 .../apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3beeb43f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
index a9abed9..7bc7f5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.api.records.impl.pb;
 
-import org.apache.commons.collections.map.UnmodifiableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -34,7 +33,10 @@ import 
org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 
-import java.util.*;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Collections;
+
 
 @Private
 @Unstable


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-5707. Add manager class for resource profiles. Contributed by Varun Vasudev.

2017-08-16 Thread sunilg
YARN-5707. Add manager class for resource profiles. Contributed by Varun 
Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd141426
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd141426
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd141426

Branch: refs/heads/YARN-3926
Commit: fd141426c85d165a920c7cf859f63297c6699133
Parents: 8ce0621
Author: Varun Vasudev 
Authored: Sat Oct 8 19:43:33 2016 +0530
Committer: Sunil G 
Committed: Wed Aug 16 23:13:02 2017 +0530

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  23 +++
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../src/main/resources/yarn-default.xml |  16 ++
 .../hadoop-yarn-server-resourcemanager/pom.xml  |   5 +
 .../resource/ResourceProfilesManager.java   |  46 +
 .../resource/ResourceProfilesManagerImpl.java   | 176 +++
 .../resource/TestResourceProfiles.java  | 142 +++
 .../resources/profiles/illegal-profiles-1.json  |  10 ++
 .../resources/profiles/illegal-profiles-2.json  |  10 ++
 .../resources/profiles/illegal-profiles-3.json  |  10 ++
 .../resources/profiles/sample-profiles-1.json   |  14 ++
 .../resources/profiles/sample-profiles-2.json   |  26 +++
 12 files changed, 482 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd141426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e809b7d..d29235c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -875,6 +875,29 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser.";
 
   /**
+   * Enable/disable resource profiles.
+   */
+  @Public
+  @Unstable
+  public static final String RM_RESOURCE_PROFILES_ENABLED =
+  RM_PREFIX + "resource-profiles.enabled";
+  @Public
+  @Unstable
+  public static final boolean DEFAULT_RM_RESOURCE_PROFILES_ENABLED = false;
+
+  /**
+   * File containing resource profiles.
+   */
+  @Public
+  @Unstable
+  public static final String RM_RESOURCE_PROFILES_SOURCE_FILE =
+  RM_PREFIX + "resource-profiles.source-file";
+  @Public
+  @Unstable
+  public static final String DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE =
+  "resource-profiles.json";
+
+  /**
* Timeout in seconds for YARN node graceful decommission.
* This is the maximal time to wait for running containers and applications
* to complete before transition a DECOMMISSIONING node into DECOMMISSIONED.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd141426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 91a8b0a..75b75cc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -135,6 +135,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 // Used as Java command line properties, not XML
 configurationPrefixToSkipCompare.add("yarn.app.container");
 
+// Ignore default file name for resource profiles
+configurationPropsToSkipCompare
+.add(YarnConfiguration.DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE);
+
 // Ignore NodeManager "work in progress" variables
 configurationPrefixToSkipCompare
 .add(YarnConfiguration.NM_NETWORK_RESOURCE_ENABLED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd141426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 

[09/50] [abbrv] hadoop git commit: YARN-6905 Multiple HBaseTimelineStorage test failures due to missing FastNumberFormat (Contributed by Haibo Chen)

2017-08-16 Thread sunilg
YARN-6905 Multiple HBaseTimelineStorage test failures due to missing 
FastNumberFormat (Contributed by Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608a06cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608a06cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608a06cc

Branch: refs/heads/YARN-3926
Commit: 608a06cca5d68b3155bd70a94bf29ae0942b9ca0
Parents: d72124a
Author: Vrushali C 
Authored: Mon Aug 14 11:40:27 2017 -0700
Committer: Vrushali C 
Committed: Mon Aug 14 11:41:11 2017 -0700

--
 .../storage/TestHBaseTimelineStorageApps.java   |  4 +-
 .../TestHBaseTimelineStorageEntities.java   | 14 ---
 .../storage/common/AppIdKeyConverter.java   |  3 +-
 .../common/HBaseTimelineStorageUtils.java   | 33 +
 .../TestCustomApplicationIdConversion.java  | 39 
 5 files changed, 86 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608a06cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index b3e5197..3948d23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.Applica
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@@ -493,7 +494,8 @@ public class TestHBaseTimelineStorageApps {
 event.addInfo(expKey, expVal);
 
 final TimelineEntity entity = new ApplicationEntity();
-entity.setId(ApplicationId.newInstance(0, 1).toString());
+entity.setId(HBaseTimelineStorageUtils.convertApplicationIdToString(
+ApplicationId.newInstance(0, 1)));
 entity.addEvent(event);
 
 TimelineEntities entities = new TimelineEntities();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608a06cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 4b4c3e1..e18d0d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -62,6 +62,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefi
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;

[04/50] [abbrv] hadoop git commit: YARN-6741. Deleting all children of a Parent Queue on refresh throws exception. Contributed by Naganarasimha G R.

2017-08-16 Thread sunilg
YARN-6741. Deleting all children of a Parent Queue on refresh throws exception. 
Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8f74c39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8f74c39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8f74c39

Branch: refs/heads/YARN-3926
Commit: d8f74c3964fa429a4a53c3651d175792cf00ac81
Parents: 7769e96
Author: bibinchundatt 
Authored: Mon Aug 14 09:39:00 2017 +0530
Committer: bibinchundatt 
Committed: Mon Aug 14 09:39:00 2017 +0530

--
 .../capacity/CapacitySchedulerQueueManager.java |   4 +
 .../scheduler/capacity/ParentQueue.java |  39 +++
 .../capacity/TestCapacityScheduler.java | 114 ++-
 3 files changed, 137 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f74c39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index e33fbb3..1ceb6fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -327,6 +327,10 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
 + "it is not yet in stopped state. Current State : "
 + oldQueue.getState());
   }
+} else if (oldQueue instanceof ParentQueue
+&& newQueue instanceof LeafQueue) {
+  LOG.info("Converting the parent queue: " + oldQueue.getQueuePath()
+  + " to leaf queue.");
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f74c39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index f6ada4f..e0baa07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -18,6 +18,14 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,7 +42,6 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AccessType;
@@ -45,7 +52,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import 

[18/50] [abbrv] hadoop git commit: HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.

2017-08-16 Thread sunilg
HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e43c28e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e43c28e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e43c28e

Branch: refs/heads/YARN-3926
Commit: 2e43c28e01fe006210e71aab179527669f6412ed
Parents: 645a8f2
Author: Yiqun Lin 
Authored: Tue Aug 15 16:48:49 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Aug 15 16:48:49 2017 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 +++--
 .../hdfs/server/protocol/SlowDiskReports.java   |  5 ++--
 .../dev-support/findbugsExcludeFile.xml | 26 +++
 .../hdfs/qjournal/server/JournalNode.java   | 16 +++-
 .../hdfs/server/datanode/DataStorage.java   | 12 ++---
 .../namenode/NNStorageRetentionManager.java | 27 +++-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  6 ++---
 .../offlineImageViewer/ImageLoaderCurrent.java  | 10 +---
 .../namenode/TestNameNodeOptionParsing.java | 27 +++-
 9 files changed, 103 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 677ea35..88b273a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2901,9 +2901,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
 synchronized (DFSClient.class) {
   if (STRIPED_READ_THREAD_POOL == null) {
-STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
+// Only after thread pool is fully constructed then save it to
+// volatile field.
+ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
 numThreads, 60, "StripedRead-", true);
-STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
+threadPool.allowCoreThreadTimeOut(true);
+STRIPED_READ_THREAD_POOL = threadPool;
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index 8095c2a..496389a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -101,8 +101,9 @@ public final class SlowDiskReports {
 }
 
 boolean areEqual;
-for (String disk : this.slowDisks.keySet()) {
-  if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
+for (Map.Entry> entry : this.slowDisks
+.entrySet()) {
+  if (!entry.getValue().equals(that.slowDisks.get(entry.getKey( {
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 2a7824a..9582fcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -264,4 +264,30 @@
 
 
 
+
+   
+   
+   
+ 
+ 
+ 
+   
+   
+   
+ 
+ 
+   
+   
+   
+ 
+ 
+   
+   
+   
+ 
+ 
+  
+  
+  
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
--
diff --git 

[07/50] [abbrv] hadoop git commit: YARN-6996. Change javax.cache library implementation from JSR107 to Apache Geronimo. (Ray Chiang via Subru).

2017-08-16 Thread sunilg
YARN-6996. Change javax.cache library implementation from JSR107 to Apache 
Geronimo. (Ray Chiang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18f3603b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18f3603b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18f3603b

Branch: refs/heads/YARN-3926
Commit: 18f3603bce37e0e07c9075811b1179afc2c227eb
Parents: e2f6299
Author: Subru Krishnan 
Authored: Mon Aug 14 11:10:00 2017 -0700
Committer: Subru Krishnan 
Committed: Mon Aug 14 11:10:00 2017 -0700

--
 hadoop-project/pom.xml | 6 +++---
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml   | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f3603b/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6311cd9..8c1d374 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -96,7 +96,7 @@
 2.0.0-M21
 1.0.0-M33
 
-1.0.0
+1.0-alpha-1
 3.3.1
 2.4.12
 6.2.1.jre7
@@ -1276,8 +1276,8 @@
   1.0.0
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
   ${jcache.version}
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f3603b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 5f85097..441a574 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -103,8 +103,8 @@
   leveldbjni-all
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
 
 
   org.ehcache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: YARN-6987. Log app attempt during InvalidStateTransition. Contributed by Jonathan Eagles

2017-08-16 Thread sunilg
YARN-6987. Log app attempt during InvalidStateTransition. Contributed by 
Jonathan Eagles


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3325ef65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3325ef65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3325ef65

Branch: refs/heads/YARN-3926
Commit: 3325ef653d6f364a82dd32485d9ef6d987380ce3
Parents: 6b09c32
Author: Jason Lowe 
Authored: Mon Aug 14 14:40:08 2017 -0500
Committer: Jason Lowe 
Committed: Mon Aug 14 14:40:08 2017 -0500

--
 .../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java   | 3 ++-
 .../server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java| 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3325ef65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index fa2f20c..03be793 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -888,7 +888,8 @@ public class RMAppImpl implements RMApp, Recoverable {
 /* keep the master in sync with the state machine */
 this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.error("Can't handle this event at current state", e);
+LOG.error("App: " + appID
++ " can't handle this event at current state", e);
 /* TODO fail the application on the failed transition */
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3325ef65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 254768b..7d453bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -911,7 +911,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 /* keep the master in sync with the state machine */
 this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.error("Can't handle this event at current state", e);
+LOG.error("App attempt: " + appAttemptID
++ " can't handle this event at current state", e);
 /* TODO fail the application on the failed transition */
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HADOOP-14726. Mark FileStatus::isDir as final

2017-08-16 Thread sunilg
HADOOP-14726. Mark FileStatus::isDir as final


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/645a8f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/645a8f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/645a8f2a

Branch: refs/heads/YARN-3926
Commit: 645a8f2a4d09acb5a21820f52ee78784d9e4cc8a
Parents: 4d7be1d
Author: Chris Douglas 
Authored: Mon Aug 14 21:57:20 2017 -0700
Committer: Chris Douglas 
Committed: Mon Aug 14 21:57:20 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileStatus.java| 19 +--
 .../hadoop/fs/viewfs/ViewFsFileStatus.java   |  8 +---
 .../fs/viewfs/ViewFsLocatedFileStatus.java   |  6 --
 .../hadoop/hdfs/protocolPB/PBHelperClient.java   |  2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java   |  6 --
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java |  8 
 .../apache/hadoop/hdfs/server/mover/Mover.java   |  2 +-
 .../hdfs/server/namenode/NamenodeFsck.java   |  4 ++--
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java |  3 +--
 .../hdfs/server/mover/TestStorageMover.java  |  2 +-
 .../hadoop/hdfs/server/namenode/TestStartup.java |  4 ++--
 .../server/namenode/ha/TestEditLogTailer.java|  4 ++--
 .../namenode/ha/TestFailureToReadEdits.java  |  6 +++---
 .../namenode/ha/TestInitializeSharedEdits.java   |  2 +-
 .../lib/input/TestCombineFileInputFormat.java|  2 +-
 .../azure/TestOutOfBandAzureBlobOperations.java  |  8 
 .../hadoop/fs/swift/snative/SwiftFileStatus.java | 16 
 .../snative/SwiftNativeFileSystemStore.java  |  4 ++--
 .../fs/swift/TestSwiftFileSystemDirectories.java |  4 ++--
 .../TestSwiftFileSystemPartitionedUploads.java   |  2 +-
 20 files changed, 46 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/645a8f2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 2f22ea0..8575439 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -172,7 +172,7 @@ public class FileStatus implements Writable, 
Comparable,
* @return true if this is a file
*/
   public boolean isFile() {
-return !isdir && !isSymlink();
+return !isDirectory() && !isSymlink();
   }
 
   /**
@@ -182,20 +182,20 @@ public class FileStatus implements Writable, 
Comparable,
   public boolean isDirectory() {
 return isdir;
   }
-  
+
   /**
-   * Old interface, instead use the explicit {@link FileStatus#isFile()}, 
-   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} 
+   * Old interface, instead use the explicit {@link FileStatus#isFile()},
+   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* @return true if this is a directory.
-   * @deprecated Use {@link FileStatus#isFile()},  
-   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} 
+   * @deprecated Use {@link FileStatus#isFile()},
+   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* instead.
*/
   @Deprecated
-  public boolean isDir() {
-return isdir;
+  public final boolean isDir() {
+return isDirectory();
   }
-  
+
   /**
* Is this a symbolic link?
* @return true if this is a symbolic link
@@ -448,7 +448,6 @@ public class FileStatus implements Writable, 
Comparable,
 FileStatus other = PBHelper.convert(proto);
 isdir = other.isDirectory();
 length = other.getLen();
-isdir = other.isDirectory();
 block_replication = other.getReplication();
 blocksize = other.getBlockSize();
 modification_time = other.getModificationTime();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/645a8f2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
index e0f62e4..ce03ced 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
@@ -61,13 +61,7 @@ class ViewFsFileStatus extends FileStatus {
 

[15/50] [abbrv] hadoop git commit: YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType update. (Kartheek Muthyala via asuresh)

2017-08-16 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7be1d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index aeba399..a1c247b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -27,6 +27,8 @@ import java.util.List;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
@@ -37,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ConfigurationException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -951,4 +954,97 @@ public class TestContainerSchedulerQueuing extends 
BaseContainerManagerTest {
 map.get(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED)
 .getContainerId());
   }
+
+  /**
+   * Starts one OPPORTUNISTIC container that takes up the whole node's
+   * resources, and submit one more that will be queued. Now promote the
+   * queued OPPORTUNISTIC container, which should kill the current running
+   * OPPORTUNISTIC container to make room for the promoted request.
+   * @throws Exception
+   */
+  @Test
+  public void testPromotionOfOpportunisticContainers() throws Exception {
+containerManager.start();
+
+ContainerLaunchContext containerLaunchContext =
+recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+List list = new ArrayList<>();
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(2048, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(1024, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+
+StartContainersRequest allRequests =
+StartContainersRequest.newInstance(list);
+containerManager.startContainers(allRequests);
+
+Thread.sleep(5000);
+
+// Ensure first container is running and others are queued.
+List statList = new ArrayList();
+for (int i = 0; i < 3; i++) {
+  statList.add(createContainerId(i));
+}
+GetContainerStatusesRequest statRequest = GetContainerStatusesRequest
+.newInstance(Arrays.asList(createContainerId(0)));
+List containerStatuses = containerManager
+.getContainerStatuses(statRequest).getContainerStatuses();
+for (ContainerStatus status : containerStatuses) {
+  if (status.getContainerId().equals(createContainerId(0))) {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+status.getState());
+  } else {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED,
+status.getState());
+  }
+}
+
+ContainerScheduler containerScheduler =
+containerManager.getContainerScheduler();
+// Ensure two containers are properly queued.
+

[25/50] [abbrv] hadoop git commit: YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by Masahiro Tanaka.

2017-08-16 Thread sunilg
YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by 
Masahiro Tanaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/588c190a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/588c190a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/588c190a

Branch: refs/heads/YARN-3926
Commit: 588c190afd49bdbd5708f7805bf6c68f09fee142
Parents: 75dd866
Author: Akira Ajisaka 
Authored: Wed Aug 16 14:06:22 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 16 14:06:22 2017 +0900

--
 .../server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java   | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/588c190a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index a4607c2..79339c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -99,7 +99,6 @@ public class FairSchedulerQueueInfo {
 steadyFairResources = new ResourceInfo(queue.getSteadyFairShare());
 fairResources = new ResourceInfo(queue.getFairShare());
 minResources = new ResourceInfo(queue.getMinShare());
-maxResources = new ResourceInfo(queue.getMaxShare());
 maxResources = new ResourceInfo(
 Resources.componentwiseMin(queue.getMaxShare(),
 scheduler.getClusterResource()));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)

2017-08-16 Thread sunilg
HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce797a17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce797a17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce797a17

Branch: refs/heads/YARN-3926
Commit: ce797a170669524224cfeaaf70647047e7626816
Parents: d8f74c3
Author: Lei Xu 
Authored: Mon Aug 14 10:27:47 2017 -0700
Committer: Lei Xu 
Committed: Mon Aug 14 10:27:47 2017 -0700

--
 .../hadoop-client-minicluster/pom.xml   |   6 --
 .../hadoop-client-runtime/pom.xml   |   7 ---
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   5 --
 .../offlineEditsViewer/XmlEditsVisitor.java |  41 
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 5850 -> 5850 bytes
 .../src/test/resources/editsStored.xml  |  62 +--
 .../hadoop-mapreduce-client/pom.xml |  10 +--
 hadoop-project-dist/pom.xml |  10 +--
 hadoop-project/pom.xml  |   8 ---
 hadoop-yarn-project/hadoop-yarn/pom.xml |  10 +--
 10 files changed, 62 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 5255640..5cf1fad 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -629,12 +629,6 @@
   
 
 
-  xerces:xercesImpl
-  
-**/*
-  
-
-
   
org.apache.hadoop:hadoop-mapreduce-client-jobclient:*
   
 testjar/*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 2f64152..24c6b7a 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -174,13 +174,6 @@
 
org/apache/jasper/compiler/Localizer.class
   
 
-
-
-  xerces:xercesImpl
-  
-META-INF/services/*
-  
-
 
 
   com.sun.jersey:*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1c50d31..fa1044d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -174,11 +174,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   compile
 
 
-  xerces
-  xercesImpl
-  compile
-
-
   org.apache.htrace
   htrace-core4
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
index 7a39ba6..ddf7933 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
@@ -20,17 +20,21 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.sax.SAXTransformerFactory;
+import javax.xml.transform.sax.TransformerHandler;
+import javax.xml.transform.stream.StreamResult;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
+import 

[02/50] [abbrv] hadoop git commit: HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.

2017-08-16 Thread sunilg
HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . 
Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b242f09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b242f09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b242f09

Branch: refs/heads/YARN-3926
Commit: 8b242f09a61a7536d2422546bfa6c2aaf1d57ed6
Parents: 28d97b7
Author: John Zhuge 
Authored: Thu Aug 10 14:04:36 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 19:42:07 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 11 ++--
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 63 
 2 files changed, 70 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index dcc997c..6bff172 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1131,8 +1131,9 @@ public class DFSInputStream extends FSInputStream
 Future firstRequest = hedgedService
 .submit(getFromDataNodeCallable);
 futures.add(firstRequest);
+Future future = null;
 try {
-  Future future = hedgedService.poll(
+  future = hedgedService.poll(
   conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
   if (future != null) {
 ByteBuffer result = future.get();
@@ -1142,16 +1143,18 @@ public class DFSInputStream extends FSInputStream
   }
   DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
   + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
-  // Ignore this node on next go around.
-  ignored.add(chosenNode.info);
   dfsClient.getHedgedReadMetrics().incHedgedReadOps();
   // continue; no need to refresh block locations
 } catch (ExecutionException e) {
-  // Ignore
+  futures.remove(future);
 } catch (InterruptedException e) {
   throw new InterruptedIOException(
   "Interrupted while waiting for reading task");
 }
+// Ignore this node on next go around.
+// If poll timeout and the request still ongoing, don't consider it
+// again. If read data failed, don't consider it either.
+ignored.add(chosenNode.info);
   } else {
 // We are starting up a 'hedged' read. We have a read already
 // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 85fc97b..bcb02b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -59,6 +59,8 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Supplier;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
 
 /**
  * This class tests the DFS positional read functionality in a single node
@@ -72,6 +74,9 @@ public class TestPread {
   boolean simulatedStorage;
   boolean isHedgedRead;
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestPread.class.getName());
+
   @Before
   public void setup() {
 simulatedStorage = false;
@@ -551,6 +556,64 @@ public class TestPread {
 }
   }
 
+  @Test(timeout=3)
+  public void testHedgedReadFromAllDNFailed() throws IOException {
+Configuration conf = new Configuration();
+int numHedgedReadPoolThreads = 5;
+final int hedgedReadTimeoutMillis = 50;
+
+conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
+numHedgedReadPoolThreads);
+conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
+hedgedReadTimeoutMillis);
+conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 

[23/50] [abbrv] hadoop git commit: HDFS-12301. NN File Browser UI: Navigate to a path when enter is pressed

2017-08-16 Thread sunilg
HDFS-12301. NN File Browser UI: Navigate to a path when enter is pressed


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f34646d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f34646d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f34646d6

Branch: refs/heads/YARN-3926
Commit: f34646d652310442cb5339aa269f10dfa838
Parents: d265459
Author: Ravi Prakash 
Authored: Tue Aug 15 15:44:59 2017 -0700
Committer: Ravi Prakash 
Committed: Tue Aug 15 15:44:59 2017 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f34646d6/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 3e276a9..dae3519 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -370,6 +370,12 @@
 
 var b = function() { browse_directory($('#directory').val()); };
 $('#btn-nav-directory').click(b);
+//Also navigate to the directory when a user presses enter.
+$('#directory').on('keyup', function (e) {
+  if (e.which == 13) {
+browse_directory($('#directory').val());
+  }
+});
 var dir = window.location.hash.slice(1);
 if(dir == "") {
   window.location.hash = "/";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2017-08-16 Thread sunilg
YARN-4081. Add support for multiple resource types in the Resource class. 
(Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a180be4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a180be4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a180be4

Branch: refs/heads/YARN-3926
Commit: 6a180be45a4d401206f582be28e1afb33cb93b74
Parents: 588c190
Author: Wangda Tan 
Authored: Thu Sep 10 09:43:26 2015 -0700
Committer: Sunil G 
Committed: Wed Aug 16 23:12:16 2017 +0530

--
 .../src/main/resources/META-INF/LICENSE.txt | 1661 ++
 .../src/main/resources/META-INF/NOTICE.txt  |  283 +++
 .../yarn/api/protocolrecords/ResourceTypes.java |   27 +
 .../hadoop/yarn/api/records/Resource.java   |  205 ++-
 .../yarn/api/records/ResourceInformation.java   |  218 +++
 .../exceptions/ResourceNotFoundException.java   |   45 +
 .../hadoop/yarn/util/UnitsConversionUtil.java   |  197 +++
 .../src/main/proto/yarn_protos.proto|   12 +
 .../yarn/conf/TestResourceInformation.java  |   70 +
 .../yarn/util/TestUnitsConversionUtil.java  |  120 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java|   13 +
 .../api/records/impl/pb/ResourcePBImpl.java |  193 +-
 .../hadoop/yarn/util/resource/Resources.java|  137 +-
 .../hadoop/yarn/api/TestPBImplRecords.java  |4 +
 14 files changed, 3104 insertions(+), 81 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by Yuqi Wang

2017-08-16 Thread sunilg
YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by 
Yuqi Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2f6299f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2f6299f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2f6299f

Branch: refs/heads/YARN-3926
Commit: e2f6299f6f580d7a03f2377d19ac85f55fd4e73b
Parents: ce797a1
Author: Jian He 
Authored: Mon Aug 14 10:51:04 2017 -0700
Committer: Jian He 
Committed: Mon Aug 14 10:51:30 2017 -0700

--
 .../scheduler/AbstractYarnScheduler.java|  1 +
 .../scheduler/capacity/CapacityScheduler.java   | 13 ++
 .../scheduler/fair/FairScheduler.java   | 15 ++-
 .../scheduler/fifo/FifoScheduler.java   | 15 ++-
 .../scheduler/fair/TestFairScheduler.java   | 46 ++--
 5 files changed, 63 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index d506f4d..79caab0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -323,6 +323,7 @@ public abstract class AbstractYarnScheduler
 
   }
 
+  // TODO: Rename it to getCurrentApplicationAttempt
   public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) {
 SchedulerApplication app = applications.get(
 applicationAttemptId.getApplicationId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 3286982..e4ca003 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -903,6 +903,19 @@ public class CapacityScheduler extends
   ContainerUpdates updateRequests) {
 FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
 if (application == null) {
+  LOG.error("Calling allocate on removed or non existent application " +
+  applicationAttemptId.getApplicationId());
+  return EMPTY_ALLOCATION;
+}
+
+// The allocate may be the leftover from previous attempt, and it will
+// impact current attempt, such as confuse the request and allocation for
+// current attempt's AM container.
+// Note outside precondition check for the attempt id may be
+// outdated here, so double check it here is necessary.
+if (!application.getApplicationAttemptId().equals(applicationAttemptId)) {
+  LOG.error("Calling allocate on previous or removed " +
+  "or non existent application attempt " + applicationAttemptId);
   return EMPTY_ALLOCATION;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 

[13/50] [abbrv] hadoop git commit: HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure durations. Contributed by Hanisha Koneru.

2017-08-16 Thread sunilg
HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure 
durations. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bef4eca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bef4eca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bef4eca

Branch: refs/heads/YARN-3926
Commit: 8bef4eca28a3466707cc4ea0de0330449319a5eb
Parents: 5558792
Author: Arpit Agarwal 
Authored: Mon Aug 14 15:53:35 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 14 15:53:35 2017 -0700

--
 .../java/org/apache/hadoop/ipc/ProtobufRpcEngine.java | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bef4eca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..2c0cfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -190,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = Time.now();
+startTime = Time.monotonicNow();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -245,7 +245,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = Time.now() - startTime;
+long callTime = Time.monotonicNow() - startTime;
 LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
   }
   
@@ -373,19 +373,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 this.server = currentCallInfo.get().server;
 this.call = Server.getCurCall().get();
 this.methodName = currentCallInfo.get().methodName;
-this.setupTime = Time.now();
+this.setupTime = Time.monotonicNow();
   }
 
   @Override
   public void setResponse(Message message) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 call.setDeferredResponse(RpcWritable.wrap(message));
 server.updateDeferredMetrics(methodName, processingTime);
   }
 
   @Override
   public void error(Throwable t) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 String detailedMetricsName = t.getClass().getSimpleName();
 server.updateDeferredMetrics(detailedMetricsName, processingTime);
 call.setDeferredError(t);
@@ -513,7 +513,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 Message param = request.getValue(prototype);
 
 Message result;
-long startTime = Time.now();
+long startTime = Time.monotonicNow();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
 boolean isDeferred = false;
@@ -537,7 +537,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   throw e;
 } finally {
   currentCallInfo.set(null);
-  int processingTime = (int) (Time.now() - startTime);
+  int processingTime = (int) (Time.monotonicNow() - startTime);
   if (LOG.isDebugEnabled()) {
 String msg =
 "Served: " + methodName + (isDeferred ? ", deferred" : "") +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: HADOOP-14673. Remove leftover hadoop_xml_escape from functions. Contributed by Ajay Kumar.

2017-08-16 Thread sunilg
HADOOP-14673. Remove leftover hadoop_xml_escape from functions. Contributed by 
Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04465113
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04465113
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04465113

Branch: refs/heads/YARN-3926
Commit: 044651139800b9e2e5b8f224772e6dbd6ded58c6
Parents: 8bef4ec
Author: Arpit Agarwal 
Authored: Mon Aug 14 16:22:10 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 14 16:22:10 2017 -0700

--
 .../src/main/bin/hadoop-functions.sh| 23 --
 .../src/test/scripts/hadoop_escape_chars.bats   | 32 
 2 files changed, 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04465113/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 3cf21cf..9ea4587 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2578,29 +2578,6 @@ function hadoop_parse_args
   hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
 }
 
-## @description  XML-escapes the characters (&'"<>) in the given parameter.
-## @audience private
-## @stabilityevolving
-## @replaceable  yes
-## @paramstring
-## @return   XML-escaped string
-function hadoop_xml_escape
-{
-  sed -e 's/&/\/g' -e 's/"/\\\/g' \
--e "s/'/\/g" -e 's//\\\/g' <<< "$1"
-}
-
-## @description  sed-escapes the characters (\/&) in the given parameter.
-## @audience private
-## @stabilityevolving
-## @replaceable  yes
-## @paramstring
-## @return   sed-escaped string
-function hadoop_sed_escape
-{
-  sed -e 's/[\/&]/\\&/g' <<< "$1"
-}
-
 ## @description Handle subcommands from main program entries
 ## @audience private
 ## @stability evolving

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04465113/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
deleted file mode 100755
index 9b031f2..000
--- 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-@test "hadoop_escape_sed (positive 1)" {
-  ret="$(hadoop_sed_escape "\pass&\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
-  expected="pass\&0#\$asdf\/g  ><'\"~\`!@#$%^\&*()_+-="
-  echo "actual >${ret}<"
-  echo "expected >${expected}<"
-  [ "${ret}" = "${expected}" ]
-}
-
-@test "hadoop_escape_xml (positive 1)" {
-  ret="$(hadoop_xml_escape "\pass&\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
-  expected="\\password\0#\$asdf/g  
~\`!@#\$%^*()_+-="
-  echo "actual >${ret}<"
-  echo "expected >${expected}<"
-  [ "${ret}" = "${expected}" ]
-}
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HADOOP-14627. Support MSI and DeviceCode token provider in ADLS. Contributed by Atul Sikaria.

2017-08-16 Thread sunilg
HADOOP-14627. Support MSI and DeviceCode token provider in ADLS. Contributed by 
Atul Sikaria.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7769e961
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7769e961
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7769e961

Branch: refs/heads/YARN-3926
Commit: 7769e9614956283a86eda9e4e69aaa592c0ca960
Parents: 8b242f0
Author: John Zhuge 
Authored: Thu Aug 10 00:43:40 2017 -0700
Committer: John Zhuge 
Committed: Sun Aug 13 00:22:34 2017 -0700

--
 .../src/main/resources/core-default.xml | 37 +++-
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  2 +-
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  8 ++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 21 +
 .../apache/hadoop/fs/adl/TokenProviderType.java |  2 +
 .../src/site/markdown/index.md  | 98 ++--
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 40 
 7 files changed, 198 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ffcab2c..7c4b0f1 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2586,11 +2586,16 @@
 ClientCredential
 
   Defines Azure Active Directory OAuth2 access token provider type.
-  Supported types are ClientCredential, RefreshToken, and Custom.
+  Supported types are ClientCredential, RefreshToken, MSI, DeviceCode,
+  and Custom.
   The ClientCredential type requires property fs.adl.oauth2.client.id,
   fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
   The RefreshToken type requires property fs.adl.oauth2.client.id and
   fs.adl.oauth2.refresh.token.
+  The MSI type requires properties fs.adl.oauth2.msi.port and
+  fs.adl.oauth2.msi.tenantguid.
+  The DeviceCode type requires property
+  fs.adl.oauth2.devicecode.clientapp.id.
   The Custom type requires property fs.adl.oauth2.access.token.provider.
 
   
@@ -2627,6 +2632,36 @@
 
   
 
+  
+fs.adl.oauth2.msi.port
+
+
+  The localhost port for the MSI token service. This is the port specified
+  when creating the Azure VM.
+  Used by MSI token provider.
+
+  
+
+  
+fs.adl.oauth2.msi.tenantguid
+
+
+  The tenant guid for the Azure AAD tenant under which the azure data lake
+  store account is created.
+  Used by MSI token provider.
+
+  
+
+  
+fs.adl.oauth2.devicecode.clientapp.id
+
+
+  The app id of the AAD native app in whose context the auth request
+  should be made.
+  Used by DeviceCode token provider.
+
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 3aed5e1..47f12df 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -110,7 +110,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.1.4
+  2.2.1
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 31df222..f77d981 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -54,6 +54,14 @@ public final class AdlConfKeys {
   public static final String TOKEN_PROVIDER_TYPE_CLIENT_CRED =
   "ClientCredential";
 
+  // MSI Auth Configuration
+  public static final String MSI_PORT = "fs.adl.oauth2.msi.port";
+  public static final String MSI_TENANT_GUID = "fs.adl.oauth2.msi.tenantguid";
+
+  // DeviceCode Auth configuration
+  public static final String DEVICE_CODE_CLIENT_APP_ID =
+  "fs.adl.oauth2.devicecode.clientapp.id";
+
   public static final String 

[01/50] [abbrv] hadoop git commit: YARN-6687. Validate that the duration of the periodic reservation is less than the periodicity. (subru via curino) [Forced Update!]

2017-08-16 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3926 a5c977e48 -> f25bc5077 (forced update)


YARN-6687. Validate that the duration of the periodic reservation is less than 
the periodicity. (subru via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28d97b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28d97b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28d97b79

Branch: refs/heads/YARN-3926
Commit: 28d97b79b69bb2be02d9320105e155eeed6f9e78
Parents: cc59b5f
Author: Carlo Curino 
Authored: Fri Aug 11 16:58:04 2017 -0700
Committer: Carlo Curino 
Committed: Fri Aug 11 16:58:04 2017 -0700

--
 .../reservation/ReservationInputValidator.java  | 18 ++--
 .../TestReservationInputValidator.java  | 93 
 2 files changed, 106 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
index 0e9a825..027d066 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
@@ -129,11 +129,12 @@ public class ReservationInputValidator {
   Resources.multiply(rr.getCapability(), rr.getConcurrency()));
 }
 // verify the allocation is possible (skip for ANY)
-if (contract.getDeadline() - contract.getArrival() < minDuration
+long duration = contract.getDeadline() - contract.getArrival();
+if (duration < minDuration
 && type != ReservationRequestInterpreter.R_ANY) {
   message =
   "The time difference ("
-  + (contract.getDeadline() - contract.getArrival())
+  + (duration)
   + ") between arrival (" + contract.getArrival() + ") "
   + "and deadline (" + contract.getDeadline() + ") must "
   + " be greater or equal to the minimum resource duration ("
@@ -158,15 +159,22 @@ public class ReservationInputValidator {
 // check that the recurrence is a positive long value.
 String recurrenceExpression = contract.getRecurrenceExpression();
 try {
-  Long recurrence = Long.parseLong(recurrenceExpression);
+  long recurrence = Long.parseLong(recurrenceExpression);
   if (recurrence < 0) {
 message = "Negative Period : " + recurrenceExpression + ". Please try"
-+ " again with a non-negative long value as period";
++ " again with a non-negative long value as period.";
+throw RPCUtil.getRemoteException(message);
+  }
+  // verify duration is less than recurrence for periodic reservations
+  if (recurrence > 0 && duration > recurrence) {
+message = "Duration of the requested reservation: " + duration
++ " is greater than the recurrence: " + recurrence
++ ". Please try again with a smaller duration.";
 throw RPCUtil.getRemoteException(message);
   }
 } catch (NumberFormatException e) {
   message = "Invalid period " + recurrenceExpression + ". Please try"
-  + " again with a non-negative long value as period";
+  + " again with a non-negative long value as period.";
   throw RPCUtil.getRemoteException(message);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
 

[16/50] [abbrv] hadoop git commit: YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType update. (Kartheek Muthyala via asuresh)

2017-08-16 Thread sunilg
YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType 
update. (Kartheek Muthyala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d7be1d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d7be1d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d7be1d8

Branch: refs/heads/YARN-3926
Commit: 4d7be1d8575e9254c59d41460960708e3718503a
Parents: 0446511
Author: Arun Suresh 
Authored: Mon Aug 14 19:46:17 2017 -0700
Committer: Arun Suresh 
Committed: Mon Aug 14 19:46:17 2017 -0700

--
 .../yarn/client/api/impl/TestAMRMClient.java| 395 +--
 .../yarn/client/api/impl/TestNMClient.java  |   7 +-
 .../containermanager/ContainerManagerImpl.java  | 132 ---
 .../containermanager/container/Container.java   |   4 +-
 .../container/ContainerImpl.java|  37 +-
 .../monitor/ContainersMonitorImpl.java  |  15 -
 .../scheduler/ContainerScheduler.java   |  73 
 .../scheduler/ContainerSchedulerEventType.java  |   1 +
 .../UpdateContainerSchedulerEvent.java  |  85 
 .../nodemanager/TestNodeManagerResync.java  |  11 +-
 .../BaseContainerManagerTest.java   |  33 +-
 .../containermanager/TestContainerManager.java  | 267 -
 .../TestContainerManagerRecovery.java   |   2 +-
 .../TestContainerSchedulerQueuing.java  |  96 +
 .../nodemanager/webapp/MockContainer.java   |   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |   2 +-
 .../security/RMContainerTokenSecretManager.java |  30 +-
 17 files changed, 964 insertions(+), 228 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7be1d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 1b2bca3..09b12f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
@@ -36,6 +37,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -142,6 +144,10 @@ public class TestAMRMClient {
 // set the minimum allocation so that resource decrease can go under 1024
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
 conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+conf.setBoolean(
+YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
+conf.setInt(
+YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10);
 yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), 
nodeCount, 1, 1);
 yarnCluster.init(conf);
 yarnCluster.start();
@@ -924,8 +930,8 @@ public class TestAMRMClient {
 // add exp=x to ANY
 client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024,
 1), null, null, Priority.UNDEFINED, true, "x"));
-Assert.assertEquals(1, client.ask.size());
-Assert.assertEquals("x", client.ask.iterator().next()
+assertEquals(1, client.ask.size());
+assertEquals("x", client.ask.iterator().next()
 .getNodeLabelExpression());
 
 // add exp=x then add exp=a to ANY in same priority, only exp=a should kept
@@ -933,8 +939,8 @@ public class TestAMRMClient {
 1), null, null, Priority.UNDEFINED, true, "x"));
 client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024,
 1), null, null, Priority.UNDEFINED, true, "a"));
-Assert.assertEquals(1, client.ask.size());
-Assert.assertEquals("a", client.ask.iterator().next()
+assertEquals(1, client.ask.size());
+assertEquals("a", client.ask.iterator().next()
 .getNodeLabelExpression());
 
 // add exp=x to ANY, rack and node, only resource request has ANY resource
@@ -943,10 +949,10 @@ public class TestAMRMClient {
 client.addContainerRequest(new 

[08/50] [abbrv] hadoop git commit: HDFS-12162. Update listStatus document to describe the behavior when the argument is a file. Contributed by Ajay Kumar.

2017-08-16 Thread sunilg
HDFS-12162. Update listStatus document to describe the behavior when the 
argument is a file. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d72124a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d72124a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d72124a4

Branch: refs/heads/YARN-3926
Commit: d72124a44268e21ada036242bfbccafc23c52ed0
Parents: 18f3603
Author: Anu Engineer 
Authored: Mon Aug 14 11:32:49 2017 -0700
Committer: Anu Engineer 
Committed: Mon Aug 14 11:32:49 2017 -0700

--
 .../hadoop/fs/http/server/FSOperations.java |  2 +-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 39 
 2 files changed, 40 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index c008802..4b5918a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -669,7 +669,7 @@ public class FSOperations {
 /**
  * Creates a list-status executor.
  *
- * @param path the directory to retrieve the status of its contents.
+ * @param path the directory/file to retrieve the status of its contents.
  * @param filter glob filter to use.
  *
  * @throws IOException thrown if the filter expression is incorrect.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 7544c80..03834eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -495,6 +495,45 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileSt
 
 See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
 
+### List a File
+
+* Submit a HTTP GET request.
+
+curl -i  "http://:/webhdfs/v1/?op=LISTSTATUS"
+
+The client receives a response with a [`FileStatuses` JSON 
object](#FileStatuses_JSON_Schema):
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Content-Length: 427
+
+{
+  "FileStatuses":
+  {
+"FileStatus":
+[
+  {
+"accessTime"  : 1320171722771,
+"blockSize"   : 33554432,
+"childrenNum" : 0,
+"fileId"  : 16390,
+"group"   : "supergroup",
+"length"  : 1366,
+"modificationTime": 1501770633062,
+"owner"   : "webuser",
+"pathSuffix"  : "",
+"permission"  : "644",
+"replication" : 1,
+"storagePolicy"   : 0,
+"type": "FILE"
+  }
+]
+  }
+}
+
+See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
+
+
 ### Iteratively List a Directory
 
 * Submit a HTTP GET request.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: HDFS-12066. When Namenode is in safemode, may not allowed to remove an user's erasure coding policy. Contributed by lufei.

2017-08-16 Thread sunilg
HDFS-12066. When Namenode is in safemode,may not allowed to remove an user's 
erasure coding policy. Contributed by lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3ae3e26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3ae3e26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3ae3e26

Branch: refs/heads/YARN-3926
Commit: e3ae3e26446c2e98b7aebc4ea66256cfdb4a397f
Parents: 1040bae
Author: Wei-Chiu Chuang 
Authored: Tue Aug 15 07:41:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 15 07:41:43 2017 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java  | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index caf73f7..1cfaa54 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7113,6 +7113,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean success = false;
 writeLock();
 try {
+  checkOperation(OperationCategory.WRITE);
+  checkNameNodeSafeMode("Cannot remove erasure coding policy "
+  + ecPolicyName);
   FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName);
   success = true;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index bc95ec7..f25d28f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -482,6 +482,15 @@ public class TestSafeMode {
   // expected
 }
 
+try {
+  dfs.removeErasureCodingPolicy("testECName");
+  fail("RemoveErasureCodingPolicy should have failed.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(
+  "Cannot remove erasure coding policy", ioe);
+  // expected
+}
+
 assertFalse("Could not leave SM",
 dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: YARN-6917. Queue path is recomputed from scratch on every allocation. Contributed by Eric Payne

2017-08-16 Thread sunilg
YARN-6917. Queue path is recomputed from scratch on every allocation. 
Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55587928
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55587928
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55587928

Branch: refs/heads/YARN-3926
Commit: 5558792894169425bff054364a1ab4c48b347fb9
Parents: 3325ef6
Author: Jason Lowe 
Authored: Mon Aug 14 15:31:34 2017 -0500
Committer: Jason Lowe 
Committed: Mon Aug 14 15:31:34 2017 -0500

--
 .../resourcemanager/scheduler/capacity/AbstractCSQueue.java  | 8 
 .../server/resourcemanager/scheduler/capacity/LeafQueue.java | 5 -
 .../resourcemanager/scheduler/capacity/ParentQueue.java  | 6 --
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 5fbdead..d7c452a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -76,6 +76,7 @@ public abstract class AbstractCSQueue implements CSQueue {
   private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class);  
   volatile CSQueue parent;
   final String queueName;
+  private final String queuePath;
   volatile int numContainers;
   
   final Resource minimumAllocation;
@@ -119,6 +120,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 this.labelManager = cs.getRMContext().getNodeLabelManager();
 this.parent = parent;
 this.queueName = queueName;
+this.queuePath =
+  ((parent == null) ? "" : (parent.getQueuePath() + ".")) + this.queueName;
 this.resourceCalculator = cs.getResourceCalculator();
 this.activitiesManager = cs.getActivitiesManager();
 
@@ -150,6 +153,11 @@ public abstract class AbstractCSQueue implements CSQueue {
 queueCapacities,
 parent == null ? null : parent.getQueueCapacities());
   }
+
+  @Override
+  public String getQueuePath() {
+return queuePath;
+  }
   
   @Override
   public float getCapacity() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 2e502b7..d15431e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -299,11 +299,6 @@ public class LeafQueue extends AbstractCSQueue {
 }
   }
 
-  @Override
-  public String getQueuePath() {
-return getParent().getQueuePath() + "." + getQueueName();
-  }
-
   /**
* Used only by tests.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 

[10/50] [abbrv] hadoop git commit: YARN-6881. LOG is unused in AllocationConfiguration (Contributed by weiyuan via Daniel Templeton)

2017-08-16 Thread sunilg
YARN-6881. LOG is unused in AllocationConfiguration (Contributed by weiyuan via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b09c327
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b09c327
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b09c327

Branch: refs/heads/YARN-3926
Commit: 6b09c327057947049ef7984afbb5ed225f15fc2d
Parents: 608a06c
Author: Daniel Templeton 
Authored: Mon Aug 14 11:55:33 2017 -0700
Committer: Daniel Templeton 
Committed: Mon Aug 14 11:55:33 2017 -0700

--
 .../resourcemanager/scheduler/fair/AllocationConfiguration.java   | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b09c327/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index f143aa6..71e6f7f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -23,8 +23,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -41,7 +39,6 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 
 public class AllocationConfiguration extends ReservationSchedulerConfiguration 
{
-  private static final Log LOG = LogFactory.getLog(FSQueue.class.getName());
   private static final AccessControlList EVERYBODY_ACL = new 
AccessControlList("*");
   private static final AccessControlList NOBODY_ACL = new AccessControlList(" 
");
   private static final ResourceCalculator RESOURCE_CALCULATOR =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by Robert Kanter

2017-08-16 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.2 155646c48 -> 864a0964f


YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by 
Robert Kanter

(cherry picked from commit 14553061be0a341df3e628dcaf06717b4630b05e)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/864a0964
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/864a0964
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/864a0964

Branch: refs/heads/branch-2.8.2
Commit: 864a0964f0c1ea10a242c94adc857169e4edf17f
Parents: 155646c
Author: Jason Lowe 
Authored: Wed Aug 16 13:04:36 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 13:17:02 2017 -0500

--
 .../apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/864a0964/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index e6237b1..1c7aa8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -176,13 +176,13 @@ public class TestAMRMProxy {
YarnClient rmClient = YarnClient.createYarnClient()) {
   Configuration conf = new YarnConfiguration();
   conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
+  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500);
   // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
   // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
   conf.setInt(
-  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
+  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 
20);
   cluster.init(conf);
   cluster.start();
   final Configuration yarnConf = cluster.getConfig();
@@ -221,7 +221,7 @@ public class TestAMRMProxy {
 lastToken = response.getAMRMToken();
 
 // Time slot to be sure the RM renew the token
-Thread.sleep(1500);
+Thread.sleep(4500);
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by Robert Kanter

2017-08-16 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 2b92c1be0 -> 2810e6ab9


YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by 
Robert Kanter

(cherry picked from commit 14553061be0a341df3e628dcaf06717b4630b05e)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2810e6ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2810e6ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2810e6ab

Branch: refs/heads/branch-2.8
Commit: 2810e6ab9c118d6c5623711df978a0422cd5c942
Parents: 2b92c1b
Author: Jason Lowe 
Authored: Wed Aug 16 13:04:36 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 13:14:25 2017 -0500

--
 .../apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2810e6ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index e6237b1..1c7aa8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -176,13 +176,13 @@ public class TestAMRMProxy {
YarnClient rmClient = YarnClient.createYarnClient()) {
   Configuration conf = new YarnConfiguration();
   conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
+  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500);
   // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
   // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
   conf.setInt(
-  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
+  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 
20);
   cluster.init(conf);
   cluster.start();
   final Configuration yarnConf = cluster.getConfig();
@@ -221,7 +221,7 @@ public class TestAMRMProxy {
 lastToken = response.getAMRMToken();
 
 // Time slot to be sure the RM renew the token
-Thread.sleep(1500);
+Thread.sleep(4500);
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by Robert Kanter

2017-08-16 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d5680c08e -> cf30380d4


YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by 
Robert Kanter

(cherry picked from commit 14553061be0a341df3e628dcaf06717b4630b05e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf30380d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf30380d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf30380d

Branch: refs/heads/branch-2
Commit: cf30380d4620bdd96dbe8284298b9b4b3c0192be
Parents: d5680c0
Author: Jason Lowe 
Authored: Wed Aug 16 13:04:36 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 13:06:13 2017 -0500

--
 .../apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf30380d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index 14df94a..6a063e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -151,13 +151,13 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
YarnClient rmClient = YarnClient.createYarnClient()) {
   Configuration conf = new YarnConfiguration();
   conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
+  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500);
   // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
   // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
   conf.setInt(
-  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
+  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 
20);
   cluster.init(conf);
   cluster.start();
   final Configuration yarnConf = cluster.getConfig();
@@ -198,7 +198,7 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
 lastToken = response.getAMRMToken();
 
 // Time slot to be sure the AMRMProxy renew the token
-Thread.sleep(1500);
+Thread.sleep(4500);
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by Robert Kanter

2017-08-16 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 588c190af -> 14553061b


YARN-7020. TestAMRMProxy#testAMRMProxyTokenRenewal is flakey. Contributed by 
Robert Kanter


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14553061
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14553061
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14553061

Branch: refs/heads/trunk
Commit: 14553061be0a341df3e628dcaf06717b4630b05e
Parents: 588c190
Author: Jason Lowe 
Authored: Wed Aug 16 13:04:36 2017 -0500
Committer: Jason Lowe 
Committed: Wed Aug 16 13:04:36 2017 -0500

--
 .../apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14553061/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
index 14df94a..6a063e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMProxy.java
@@ -151,13 +151,13 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
YarnClient rmClient = YarnClient.createYarnClient()) {
   Configuration conf = new YarnConfiguration();
   conf.setBoolean(YarnConfiguration.AMRM_PROXY_ENABLED, true);
-  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1500);
-  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 1500);
+  conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 4500);
+  conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 4500);
   // RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS should be at least
   // RM_AM_EXPIRY_INTERVAL_MS * 1.5 *3
   conf.setInt(
-  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 6);
+  YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, 
20);
   cluster.init(conf);
   cluster.start();
   final Configuration yarnConf = cluster.getConfig();
@@ -198,7 +198,7 @@ public class TestAMRMProxy extends BaseAMRMProxyE2ETest {
 lastToken = response.getAMRMToken();
 
 // Time slot to be sure the AMRMProxy renew the token
-Thread.sleep(1500);
+Thread.sleep(4500);
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7006. [ATSv2 Security] Changes for authentication for CollectorNodemanagerProtocol. Contributed by Varun Saxena

2017-08-16 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 ee5d80d3b -> 315ff9bdc


YARN-7006. [ATSv2 Security] Changes for authentication for 
CollectorNodemanagerProtocol. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/315ff9bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/315ff9bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/315ff9bd

Branch: refs/heads/YARN-5355
Commit: 315ff9bdc62b1e5300baae91758d0874f027af3e
Parents: ee5d80d
Author: Jian He 
Authored: Wed Aug 16 11:01:06 2017 -0700
Committer: Jian He 
Committed: Wed Aug 16 11:01:06 2017 -0700

--
 .../collectormanager/NMCollectorService.java|  7 +-
 .../containermanager/AuxServices.java   |  3 +-
 .../timelineservice/NMTimelinePublisher.java| 29 ++--
 .../CollectorNodemanagerSecurityInfo.java   | 69 
 .../org.apache.hadoop.security.SecurityInfo | 14 
 5 files changed, 112 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/315ff9bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
index 7fdca78..862cd1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -72,13 +72,13 @@ public class NMCollectorService extends CompositeService 
implements
 
 Configuration serverConf = new Configuration(conf);
 
-// TODO Security settings.
 YarnRPC rpc = YarnRPC.create(conf);
 
+// Kerberos based authentication to be used for CollectorNodemanager
+// protocol if security is enabled.
 server =
 rpc.getServer(CollectorNodemanagerProtocol.class, this,
-collectorServerAddress, serverConf,
-this.context.getNMTokenSecretManager(),
+collectorServerAddress, serverConf, null,
 conf.getInt(YarnConfiguration.NM_COLLECTOR_SERVICE_THREAD_COUNT,
 YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT));
 
@@ -93,7 +93,6 @@ public class NMCollectorService extends CompositeService 
implements
 LOG.info("NMCollectorService started at " + collectorServerAddress);
   }
 
-
   @Override
   public void serviceStop() throws Exception {
 if (server != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/315ff9bd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index c0e1f5a..9b17f18 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -243,7 +243,8 @@ public class AuxServices extends AbstractService
 for (AuxiliaryService serv : serviceMap.values()) {
   try {
 serv.initializeContainer(new ContainerInitializationContext(
-event.getUser(), event.getContainer().getContainerId(),
+event.getContainer().getUser(),
+event.getContainer().getContainerId(),
 event.getContainer().getResource(), event.getContainer()
 .getContainerTokenIdentifier().getContainerType()));
   } catch (Throwable th) {


[42/50] [abbrv] hadoop git commit: HDFS-12066. When Namenode is in safemode, may not allowed to remove an user's erasure coding policy. Contributed by lufei.

2017-08-16 Thread haibochen
HDFS-12066. When Namenode is in safemode,may not allowed to remove an user's 
erasure coding policy. Contributed by lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3ae3e26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3ae3e26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3ae3e26

Branch: refs/heads/YARN-1011
Commit: e3ae3e26446c2e98b7aebc4ea66256cfdb4a397f
Parents: 1040bae
Author: Wei-Chiu Chuang 
Authored: Tue Aug 15 07:41:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 15 07:41:43 2017 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java  | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index caf73f7..1cfaa54 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7113,6 +7113,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean success = false;
 writeLock();
 try {
+  checkOperation(OperationCategory.WRITE);
+  checkNameNodeSafeMode("Cannot remove erasure coding policy "
+  + ecPolicyName);
   FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName);
   success = true;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index bc95ec7..f25d28f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -482,6 +482,15 @@ public class TestSafeMode {
   // expected
 }
 
+try {
+  dfs.removeErasureCodingPolicy("testECName");
+  fail("RemoveErasureCodingPolicy should have failed.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(
+  "Cannot remove erasure coding policy", ioe);
+  // expected
+}
+
 assertFalse("Could not leave SM",
 dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure durations. Contributed by Hanisha Koneru.

2017-08-16 Thread haibochen
HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure 
durations. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bef4eca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bef4eca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bef4eca

Branch: refs/heads/YARN-1011
Commit: 8bef4eca28a3466707cc4ea0de0330449319a5eb
Parents: 5558792
Author: Arpit Agarwal 
Authored: Mon Aug 14 15:53:35 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 14 15:53:35 2017 -0700

--
 .../java/org/apache/hadoop/ipc/ProtobufRpcEngine.java | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bef4eca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..2c0cfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -190,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = Time.now();
+startTime = Time.monotonicNow();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -245,7 +245,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = Time.now() - startTime;
+long callTime = Time.monotonicNow() - startTime;
 LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
   }
   
@@ -373,19 +373,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 this.server = currentCallInfo.get().server;
 this.call = Server.getCurCall().get();
 this.methodName = currentCallInfo.get().methodName;
-this.setupTime = Time.now();
+this.setupTime = Time.monotonicNow();
   }
 
   @Override
   public void setResponse(Message message) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 call.setDeferredResponse(RpcWritable.wrap(message));
 server.updateDeferredMetrics(methodName, processingTime);
   }
 
   @Override
   public void error(Throwable t) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 String detailedMetricsName = t.getClass().getSimpleName();
 server.updateDeferredMetrics(detailedMetricsName, processingTime);
 call.setDeferredError(t);
@@ -513,7 +513,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 Message param = request.getValue(prototype);
 
 Message result;
-long startTime = Time.now();
+long startTime = Time.monotonicNow();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
 boolean isDeferred = false;
@@ -537,7 +537,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   throw e;
 } finally {
   currentCallInfo.set(null);
-  int processingTime = (int) (Time.now() - startTime);
+  int processingTime = (int) (Time.monotonicNow() - startTime);
   if (LOG.isDebugEnabled()) {
 String msg =
 "Served: " + methodName + (isDeferred ? ", deferred" : "") +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[28/50] [abbrv] hadoop git commit: YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by Yuqi Wang

2017-08-16 Thread haibochen
YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by 
Yuqi Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2f6299f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2f6299f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2f6299f

Branch: refs/heads/YARN-1011
Commit: e2f6299f6f580d7a03f2377d19ac85f55fd4e73b
Parents: ce797a1
Author: Jian He 
Authored: Mon Aug 14 10:51:04 2017 -0700
Committer: Jian He 
Committed: Mon Aug 14 10:51:30 2017 -0700

--
 .../scheduler/AbstractYarnScheduler.java|  1 +
 .../scheduler/capacity/CapacityScheduler.java   | 13 ++
 .../scheduler/fair/FairScheduler.java   | 15 ++-
 .../scheduler/fifo/FifoScheduler.java   | 15 ++-
 .../scheduler/fair/TestFairScheduler.java   | 46 ++--
 5 files changed, 63 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index d506f4d..79caab0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -323,6 +323,7 @@ public abstract class AbstractYarnScheduler
 
   }
 
+  // TODO: Rename it to getCurrentApplicationAttempt
   public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) {
 SchedulerApplication app = applications.get(
 applicationAttemptId.getApplicationId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 3286982..e4ca003 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -903,6 +903,19 @@ public class CapacityScheduler extends
   ContainerUpdates updateRequests) {
 FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
 if (application == null) {
+  LOG.error("Calling allocate on removed or non existent application " +
+  applicationAttemptId.getApplicationId());
+  return EMPTY_ALLOCATION;
+}
+
+// The allocate may be the leftover from previous attempt, and it will
+// impact current attempt, such as confuse the request and allocation for
+// current attempt's AM container.
+// Note outside precondition check for the attempt id may be
+// outdated here, so double check it here is necessary.
+if (!application.getApplicationAttemptId().equals(applicationAttemptId)) {
+  LOG.error("Calling allocate on previous or removed " +
+  "or non existent application attempt " + applicationAttemptId);
   return EMPTY_ALLOCATION;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 

[30/50] [abbrv] hadoop git commit: HDFS-12162. Update listStatus document to describe the behavior when the argument is a file. Contributed by Ajay Kumar.

2017-08-16 Thread haibochen
HDFS-12162. Update listStatus document to describe the behavior when the 
argument is a file. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d72124a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d72124a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d72124a4

Branch: refs/heads/YARN-1011
Commit: d72124a44268e21ada036242bfbccafc23c52ed0
Parents: 18f3603
Author: Anu Engineer 
Authored: Mon Aug 14 11:32:49 2017 -0700
Committer: Anu Engineer 
Committed: Mon Aug 14 11:32:49 2017 -0700

--
 .../hadoop/fs/http/server/FSOperations.java |  2 +-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 39 
 2 files changed, 40 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index c008802..4b5918a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -669,7 +669,7 @@ public class FSOperations {
 /**
  * Creates a list-status executor.
  *
- * @param path the directory to retrieve the status of its contents.
+ * @param path the directory/file to retrieve the status of its contents.
  * @param filter glob filter to use.
  *
  * @throws IOException thrown if the filter expression is incorrect.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 7544c80..03834eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -495,6 +495,45 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileSt
 
 See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
 
+### List a File
+
+* Submit a HTTP GET request.
+
+curl -i  "http://:/webhdfs/v1/?op=LISTSTATUS"
+
+The client receives a response with a [`FileStatuses` JSON 
object](#FileStatuses_JSON_Schema):
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Content-Length: 427
+
+{
+  "FileStatuses":
+  {
+"FileStatus":
+[
+  {
+"accessTime"  : 1320171722771,
+"blockSize"   : 33554432,
+"childrenNum" : 0,
+"fileId"  : 16390,
+"group"   : "supergroup",
+"length"  : 1366,
+"modificationTime": 1501770633062,
+"owner"   : "webuser",
+"pathSuffix"  : "",
+"permission"  : "644",
+"replication" : 1,
+"storagePolicy"   : 0,
+"type": "FILE"
+  }
+]
+  }
+}
+
+See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
+
+
 ### Iteratively List a Directory
 
 * Submit a HTTP GET request.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-6705 Add separate NM preemption thresholds for cpu and memory (Haibo Chen)

2017-08-16 Thread haibochen
YARN-6705 Add separate NM preemption thresholds for cpu and memory  (Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6e03a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6e03a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6e03a59

Branch: refs/heads/YARN-1011
Commit: f6e03a59b0ad4ab0ac5e6b520884b7c7e8019986
Parents: 5baae1b
Author: Haibo Chen 
Authored: Wed Jul 12 12:32:13 2017 -0700
Committer: Haibo Chen 
Committed: Wed Aug 16 10:02:43 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 31 +--
 .../src/main/resources/yarn-default.xml | 34 ++--
 .../monitor/ContainersMonitorImpl.java  | 42 +---
 3 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e03a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a54bd11..6fb75de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1643,10 +1643,33 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
   NM_PREFIX + "overallocation.memory-utilization-threshold";
 
-  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
-  NM_PREFIX + "overallocation.preemption-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0.96f;
+  /**
+   * The CPU utilization threshold, if went beyond for a few times in a row,
+   * OPPORTUNISTIC containers started due to overallocation should start
+   * getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.cpu";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD = 0.99f;
+
+  /**
+   * The number of times that CPU utilization must go over the CPU preemption
+   * threshold consecutively before preemption starts to kick in.
+   */
+  public static final String NM_OVERALLOCATION_PREEMPTION_CPU_COUNT =
+  NM_PREFIX + "overallocation.preemption-threshold-count.cpu";
+  public static final int DEFAULT_NM_OVERALLOCATION_PREEMPTION_CPU_COUNT = 4;
+
+
+  /**
+   * The memory utilization threshold beyond which OPPORTUNISTIC containers
+   * started due to overallocation should start getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.memory";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD = 0.95f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e03a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d76e13e..9b9b816 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1588,11 +1588,37 @@
 
   
 When a node is over-allocated to improve utilization by
-  running OPPORTUNISTIC containers, this config captures the utilization
-  beyond which OPPORTUNISTIC containers should start getting preempted.
+  running OPPORTUNISTIC containers, this config captures the CPU
+  utilization beyond which OPPORTUNISTIC containers should start getting
+  preempted. This is used in combination with
+  yarn.nodemanager.overallocation.preemption-threshold-count.cpu, that is,
+  only when the CPU utilization goes over this threshold consecutively for
+  a few times will preemption kicks in.
 
-yarn.nodemanager.overallocation.preemption-threshold
-0.96
+yarn.nodemanager.overallocation.preemption-threshold.cpu
+0.99
+  
+
+  
+When a node is over-allocated to 

[34/50] [abbrv] hadoop git commit: YARN-6917. Queue path is recomputed from scratch on every allocation. Contributed by Eric Payne

2017-08-16 Thread haibochen
YARN-6917. Queue path is recomputed from scratch on every allocation. 
Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55587928
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55587928
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55587928

Branch: refs/heads/YARN-1011
Commit: 5558792894169425bff054364a1ab4c48b347fb9
Parents: 3325ef6
Author: Jason Lowe 
Authored: Mon Aug 14 15:31:34 2017 -0500
Committer: Jason Lowe 
Committed: Mon Aug 14 15:31:34 2017 -0500

--
 .../resourcemanager/scheduler/capacity/AbstractCSQueue.java  | 8 
 .../server/resourcemanager/scheduler/capacity/LeafQueue.java | 5 -
 .../resourcemanager/scheduler/capacity/ParentQueue.java  | 6 --
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 5fbdead..d7c452a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -76,6 +76,7 @@ public abstract class AbstractCSQueue implements CSQueue {
   private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class);  
   volatile CSQueue parent;
   final String queueName;
+  private final String queuePath;
   volatile int numContainers;
   
   final Resource minimumAllocation;
@@ -119,6 +120,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 this.labelManager = cs.getRMContext().getNodeLabelManager();
 this.parent = parent;
 this.queueName = queueName;
+this.queuePath =
+  ((parent == null) ? "" : (parent.getQueuePath() + ".")) + this.queueName;
 this.resourceCalculator = cs.getResourceCalculator();
 this.activitiesManager = cs.getActivitiesManager();
 
@@ -150,6 +153,11 @@ public abstract class AbstractCSQueue implements CSQueue {
 queueCapacities,
 parent == null ? null : parent.getQueueCapacities());
   }
+
+  @Override
+  public String getQueuePath() {
+return queuePath;
+  }
   
   @Override
   public float getCapacity() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 2e502b7..d15431e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -299,11 +299,6 @@ public class LeafQueue extends AbstractCSQueue {
 }
   }
 
-  @Override
-  public String getQueuePath() {
-return getParent().getQueuePath() + "." + getQueueName();
-  }
-
   /**
* Used only by tests.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 

[23/50] [abbrv] hadoop git commit: YARN-6687. Validate that the duration of the periodic reservation is less than the periodicity. (subru via curino)

2017-08-16 Thread haibochen
YARN-6687. Validate that the duration of the periodic reservation is less than 
the periodicity. (subru via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28d97b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28d97b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28d97b79

Branch: refs/heads/YARN-1011
Commit: 28d97b79b69bb2be02d9320105e155eeed6f9e78
Parents: cc59b5f
Author: Carlo Curino 
Authored: Fri Aug 11 16:58:04 2017 -0700
Committer: Carlo Curino 
Committed: Fri Aug 11 16:58:04 2017 -0700

--
 .../reservation/ReservationInputValidator.java  | 18 ++--
 .../TestReservationInputValidator.java  | 93 
 2 files changed, 106 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
index 0e9a825..027d066 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
@@ -129,11 +129,12 @@ public class ReservationInputValidator {
   Resources.multiply(rr.getCapability(), rr.getConcurrency()));
 }
 // verify the allocation is possible (skip for ANY)
-if (contract.getDeadline() - contract.getArrival() < minDuration
+long duration = contract.getDeadline() - contract.getArrival();
+if (duration < minDuration
 && type != ReservationRequestInterpreter.R_ANY) {
   message =
   "The time difference ("
-  + (contract.getDeadline() - contract.getArrival())
+  + (duration)
   + ") between arrival (" + contract.getArrival() + ") "
   + "and deadline (" + contract.getDeadline() + ") must "
   + " be greater or equal to the minimum resource duration ("
@@ -158,15 +159,22 @@ public class ReservationInputValidator {
 // check that the recurrence is a positive long value.
 String recurrenceExpression = contract.getRecurrenceExpression();
 try {
-  Long recurrence = Long.parseLong(recurrenceExpression);
+  long recurrence = Long.parseLong(recurrenceExpression);
   if (recurrence < 0) {
 message = "Negative Period : " + recurrenceExpression + ". Please try"
-+ " again with a non-negative long value as period";
++ " again with a non-negative long value as period.";
+throw RPCUtil.getRemoteException(message);
+  }
+  // verify duration is less than recurrence for periodic reservations
+  if (recurrence > 0 && duration > recurrence) {
+message = "Duration of the requested reservation: " + duration
++ " is greater than the recurrence: " + recurrence
++ ". Please try again with a smaller duration.";
 throw RPCUtil.getRemoteException(message);
   }
 } catch (NumberFormatException e) {
   message = "Invalid period " + recurrenceExpression + ". Please try"
-  + " again with a non-negative long value as period";
+  + " again with a non-negative long value as period.";
   throw RPCUtil.getRemoteException(message);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
index 2917cd9..90a681d 100644

[37/50] [abbrv] hadoop git commit: YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType update. (Kartheek Muthyala via asuresh)

2017-08-16 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7be1d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index aeba399..a1c247b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -27,6 +27,8 @@ import java.util.List;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
@@ -37,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ConfigurationException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -951,4 +954,97 @@ public class TestContainerSchedulerQueuing extends 
BaseContainerManagerTest {
 map.get(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED)
 .getContainerId());
   }
+
+  /**
+   * Starts one OPPORTUNISTIC container that takes up the whole node's
+   * resources, and submit one more that will be queued. Now promote the
+   * queued OPPORTUNISTIC container, which should kill the current running
+   * OPPORTUNISTIC container to make room for the promoted request.
+   * @throws Exception
+   */
+  @Test
+  public void testPromotionOfOpportunisticContainers() throws Exception {
+containerManager.start();
+
+ContainerLaunchContext containerLaunchContext =
+recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+List list = new ArrayList<>();
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(2048, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(1024, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+
+StartContainersRequest allRequests =
+StartContainersRequest.newInstance(list);
+containerManager.startContainers(allRequests);
+
+Thread.sleep(5000);
+
+// Ensure first container is running and others are queued.
+List statList = new ArrayList();
+for (int i = 0; i < 3; i++) {
+  statList.add(createContainerId(i));
+}
+GetContainerStatusesRequest statRequest = GetContainerStatusesRequest
+.newInstance(Arrays.asList(createContainerId(0)));
+List containerStatuses = containerManager
+.getContainerStatuses(statRequest).getContainerStatuses();
+for (ContainerStatus status : containerStatuses) {
+  if (status.getContainerId().equals(createContainerId(0))) {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+status.getState());
+  } else {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED,
+status.getState());
+  }
+}
+
+ContainerScheduler containerScheduler =
+containerManager.getContainerScheduler();
+// Ensure two containers are properly queued.
+

  1   2   >