hadoop git commit: MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergely Novák.

2017-09-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 98612bb2d -> 0af3aec50


MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. 
Contributed by Gergely Novák.

(cherry picked from commit 53be075241f1ba92bfe47e89c2dfc3f0664e2578)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0af3aec5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0af3aec5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0af3aec5

Branch: refs/heads/branch-3.0
Commit: 0af3aec50256c2a5f4d543f046772f368bf1a917
Parents: 98612bb
Author: Akira Ajisaka 
Authored: Fri Sep 22 13:27:59 2017 +0900
Committer: Akira Ajisaka 
Committed: Fri Sep 22 13:30:22 2017 +0900

--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml| 4 
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 ---
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 ---
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 +++
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 ---
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 ---
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 10 files changed, 35 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0af3aec5/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
--
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 13dc340..4aadcd6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -112,6 +112,10 @@
   guava
   provided
  
+
+  org.slf4j
+  slf4j-api
+
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0af3aec5/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index b9987a5..2bda89d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -52,6 +50,8 @@ import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -84,7 +84,8 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0af3aec5/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 8dec39d..7b73820 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 

hadoop git commit: MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergely Novák.

2017-09-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7cd9018b1 -> 9d4de9c55


MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. 
Contributed by Gergely Novák.

(cherry picked from commit 53be075241f1ba92bfe47e89c2dfc3f0664e2578)

Conflicts:

hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraOutputFormat.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d4de9c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d4de9c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d4de9c5

Branch: refs/heads/branch-2
Commit: 9d4de9c556215e64c0810641fcab1bbf657500f9
Parents: 7cd9018
Author: Akira Ajisaka 
Authored: Fri Sep 22 13:27:59 2017 +0900
Committer: Akira Ajisaka 
Committed: Fri Sep 22 13:29:17 2017 +0900

--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml| 4 
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 ---
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 ---
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 +++
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 ---
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 9 files changed, 31 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d4de9c5/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
--
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index f8d3d8d..215f2f3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -114,6 +114,10 @@
   guava
   provided
  
+
+  org.slf4j
+  slf4j-api
+
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d4de9c5/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index b9987a5..2bda89d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -52,6 +50,8 @@ import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -84,7 +84,8 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d4de9c5/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 1ec8739..54a5ba4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 

hadoop git commit: MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergely Novák.

2017-09-21 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3baae4322 -> 53be07524


MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. 
Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53be0752
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53be0752
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53be0752

Branch: refs/heads/trunk
Commit: 53be075241f1ba92bfe47e89c2dfc3f0664e2578
Parents: 3baae43
Author: Akira Ajisaka 
Authored: Fri Sep 22 13:27:59 2017 +0900
Committer: Akira Ajisaka 
Committed: Fri Sep 22 13:27:59 2017 +0900

--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml| 4 
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 ---
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 ---
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 +++
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 ---
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 ---
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 10 files changed, 35 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53be0752/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
--
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 4a50ed3..5e5dd8f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -112,6 +112,10 @@
   guava
   provided
  
+
+  org.slf4j
+  slf4j-api
+
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53be0752/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index b9987a5..2bda89d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -52,6 +50,8 @@ import 
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -84,7 +84,8 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53be0752/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 8dec39d..7b73820 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,8 +29,6 @@ import java.sql.SQLException;
 import 

hadoop git commit: HDFS-12507. StripedBlockUtil.java:694: warning - Tag @link: reference not found: StripingCell. Contributed by Mukul Kumar Singh

2017-09-21 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk bfd1a72ba -> 3baae4322


HDFS-12507. StripedBlockUtil.java:694: warning - Tag @link: reference not 
found: StripingCell.  Contributed by Mukul Kumar Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3baae432
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3baae432
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3baae432

Branch: refs/heads/trunk
Commit: 3baae4322e3f2035c0a99eb9e4306567883581d1
Parents: bfd1a72
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Sep 22 10:37:04 2017 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Sep 22 10:37:04 2017 +0800

--
 .../main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3baae432/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 896ebc6..9e24576 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -575,7 +575,7 @@ public class StripedBlockUtil {
* TODO: consider parity cells
*/
   @VisibleForTesting
-  static class StripingCell {
+  public static class StripingCell {
 final ErasureCodingPolicy ecPolicy;
 /** Logical order in a block group, used when doing I/O to a block group. 
*/
 final int idxInBlkGroup;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] hadoop git commit: Revert "MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe"

2017-09-21 Thread asuresh
Revert "MAPREDUCE-6958. Shuffle audit logger should log size of shuffle 
transfer. Contributed by Jason Lowe"

This reverts commit b3d61304f2fa4a99526f7a60ccaac9f262083079.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea845ba5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea845ba5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea845ba5

Branch: refs/heads/YARN-6592
Commit: ea845ba58c585647c4be8d30d9b814f098e34a12
Parents: aa6e8d2
Author: Jason Lowe 
Authored: Tue Sep 19 08:45:05 2017 -0500
Committer: Jason Lowe 
Committed: Tue Sep 19 08:45:05 2017 -0500

--
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++---
 1 file changed, 7 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea845ba5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 06a3e42..863da7e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,6 +992,13 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
 
+  // this audit log is disabled by default,
+  // to turn it on please enable this audit log
+  // on log4j.properties by uncommenting the setting
+  if (AUDITLOG.isDebugEnabled()) {
+AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
+ " reducer " + reduceQ.get(0));
+  }
   int reduceId;
   String jobId;
   try {
@@ -1176,17 +1183,6 @@ public class ShuffleHandler extends AuxiliaryService {
 
   // Now set the response headers.
   setResponseHeaders(response, keepAliveParam, contentLength);
-
-  // this audit log is disabled by default,
-  // to turn it on please enable this audit log
-  // on log4j.properties by uncommenting the setting
-  if (AUDITLOG.isDebugEnabled()) {
-StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
-sb.append(" mappers: ").append(mapIds);
-sb.append(" reducer ").append(reduce);
-sb.append(" length ").append(contentLength);
-AUDITLOG.debug(sb.toString());
-  }
 }
 
 protected void setResponseHeaders(HttpResponse response,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] hadoop git commit: YARN-7192. Add a pluggable StateMachine Listener that is notified of NM Container State changes. Contributed by Arun Suresh

2017-09-21 Thread asuresh
YARN-7192. Add a pluggable StateMachine Listener that is notified of NM 
Container State changes. Contributed by Arun Suresh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4f9c7c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4f9c7c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4f9c7c9

Branch: refs/heads/YARN-6592
Commit: a4f9c7c9247801dd37beec6fc195622af1b884ad
Parents: 0f9af24
Author: Jason Lowe 
Authored: Mon Sep 18 10:16:09 2017 -0500
Committer: Jason Lowe 
Committed: Mon Sep 18 10:16:09 2017 -0500

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 +-
 .../state/MultiStateTransitionListener.java | 61 ++
 .../hadoop/yarn/state/StateMachineFactory.java  | 40 
 .../yarn/state/StateTransitionListener.java | 50 ++
 .../src/main/resources/yarn-default.xml |  6 ++
 .../ContainerStateTransitionListener.java   | 48 ++
 .../hadoop/yarn/server/nodemanager/Context.java |  2 +
 .../yarn/server/nodemanager/NodeManager.java| 48 +-
 .../container/ContainerImpl.java|  3 +-
 .../server/nodemanager/TestNodeManager.java | 68 
 .../amrmproxy/BaseAMRMProxyTest.java|  8 +++
 .../container/TestContainer.java| 53 +++
 12 files changed, 389 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 48910b3..114453f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -968,9 +968,13 @@ public class YarnConfiguration extends Configuration {
 NM_PREFIX + "bind-host";
 
   /** who will execute(launch) the containers.*/
-  public static final String NM_CONTAINER_EXECUTOR = 
+  public static final String NM_CONTAINER_EXECUTOR =
 NM_PREFIX + "container-executor.class";
 
+  /** List of container state transition listeners.*/
+  public static final String NM_CONTAINER_STATE_TRANSITION_LISTENERS =
+  NM_PREFIX + "container-state-transition-listener.classes";
+
   /**  
* Adjustment to make to the container os scheduling priority.
* The valid values for this could vary depending on the platform.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f9c7c9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
new file mode 100644
index 000..1a28fc5
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/MultiStateTransitionListener.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.state;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A {@link StateTransitionListener} that dispatches the pre and post
+ * state transitions to multiple registered listeners.
+ * NOTE: The registered listeners are called in a for loop. Clients should
+ *   know that a listener configured earlier might prevent a later listener
+ *   

[29/50] hadoop git commit: HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot finish in 60s. (SammiChen via lei)

2017-09-21 Thread asuresh
HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly 
cannot finish in 60s. (SammiChen via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bbeacb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bbeacb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bbeacb7

Branch: refs/heads/YARN-6592
Commit: 7bbeacb75e93261dbda0e8efcde510e5fcf83efb
Parents: fda1221
Author: Lei Xu 
Authored: Tue Sep 19 11:50:01 2017 -0700
Committer: Lei Xu 
Committed: Tue Sep 19 11:50:01 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bbeacb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 72b1412..713a10b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -456,8 +456,8 @@ public class TestReconstructStripedFile {
 ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
 fs.getClient().setErasureCodingPolicy("/", policy.getName());
 
-final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
-for (int i = 0; i < 100; i++) {
+final int fileLen = cellSize * ecPolicy.getNumDataUnits();
+for (int i = 0; i < 50; i++) {
   writeFile(fs, "/ec-file-" + i, fileLen);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] hadoop git commit: YARN-7149. Cross-queue preemption sometimes starves an underserved queue. (Eric Payne via wangda)

2017-09-21 Thread asuresh
YARN-7149. Cross-queue preemption sometimes starves an underserved queue. (Eric 
Payne via wangda)

Change-Id: Ib269991dbebce160378e8372ee6d24849c4a5ed6
(cherry picked from commit 3dfa937a1fadfc62947755872515f549b3b15e6a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38c14ef8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38c14ef8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38c14ef8

Branch: refs/heads/YARN-6592
Commit: 38c14ef8d8a094a7101917eb77d90f5e62324f61
Parents: 958e8c0
Author: Wangda Tan 
Authored: Fri Sep 15 21:25:21 2017 -0700
Committer: Wangda Tan 
Committed: Fri Sep 15 21:29:39 2017 -0700

--
 .../scheduler/capacity/UsersManager.java|  4 +-
 .../capacity/TestContainerAllocation.java   | 50 
 .../scheduler/capacity/TestLeafQueue.java   |  8 ++--
 3 files changed, 57 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c14ef8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
index 5f7d185..33f30b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UsersManager.java
@@ -731,7 +731,9 @@ public class UsersManager implements AbstractUsersManager {
  * should be higher than queue-hard-limit * ulMin
  */
 float usersSummedByWeight = activeUsersTimesWeights;
-Resource resourceUsed = totalResUsageForActiveUsers.getUsed(nodePartition);
+Resource resourceUsed = Resources.add(
+totalResUsageForActiveUsers.getUsed(nodePartition),
+required);
 
 // For non-activeUser calculation, consider all users count.
 if (!activeUser) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38c14ef8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index dd6b25b..906febf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.SecurityUtilTestHelper;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
@@ -887,4 +888,53 @@ public class TestContainerAllocation {
 
 rm1.close();
   }
+
+
+
+  @Test(timeout = 6)
+  public void testUserLimitAllocationMultipleContainers() throws Exception {
+CapacitySchedulerConfiguration newConf =
+(CapacitySchedulerConfiguration) TestUtils
+.getConfigurationWithMultipleQueues(conf);
+newConf.setUserLimit("root.c", 50);
+MockRM rm1 = new MockRM(newConf);
+
+rm1.getRMContext().setNodeLabelManager(mgr);
+rm1.start();
+MockNM nm1 = rm1.registerNode("h1:1234", 1000 * GB);
+
+// launch app from 1st user to queue C, AM container should be launched in 
nm1
+

[49/50] hadoop git commit: YARN-4266. Allow users to enter containers as UID:GID pair instead of by username. Contributed by luhuichun, Zhankun Tang, and Eric Badger.

2017-09-21 Thread asuresh
YARN-4266. Allow users to enter containers as UID:GID pair instead of by 
username. Contributed by luhuichun, Zhankun Tang, and Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfd1a72b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfd1a72b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfd1a72b

Branch: refs/heads/YARN-6592
Commit: bfd1a72ba8fbb06da73fede2a85e0b544d6ab43f
Parents: e5e1851
Author: Jason Lowe 
Authored: Thu Sep 21 17:41:34 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 21 17:41:34 2017 -0500

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 ++
 .../src/main/resources/yarn-default.xml | 19 +
 .../runtime/DockerLinuxContainerRuntime.java| 76 ++-
 .../linux/runtime/docker/DockerRunCommand.java  |  7 ++
 .../impl/container-executor.c   |  4 +
 .../runtime/TestDockerContainerRuntime.java | 79 +++-
 6 files changed, 204 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 114453f..f58833c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1587,6 +1587,27 @@ public class YarnConfiguration extends Configuration {
   public static final boolean DEFAULT_NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS =
   false;
 
+  /** enable user remapping. */
+  public static final String NM_DOCKER_ENABLE_USER_REMAPPING =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "enable-userremapping.allowed";
+
+  /** Set enable user remapping as false by default. */
+  public static final boolean DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING = false;
+
+  /** lower limit for acceptable uids of user remapped user. */
+  public static final String NM_DOCKER_USER_REMAPPING_UID_THRESHOLD =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "userremapping-uid-threshold";
+
+  /** Set user remapping lower uid limit to 1 by default. */
+  public static final int DEFAULT_NM_DOCKER_USER_REMAPPING_UID_THRESHOLD = 1;
+
+  /** lower limit for acceptable gids of user remapped user. */
+  public static final String NM_DOCKER_USER_REMAPPING_GID_THRESHOLD =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "userremapping-gid-threshold";
+
+  /** Set user remapping lower gid limit to 1 by default. */
+  public static final int DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD = 1;
+
   /** ACL list for users allowed to run privileged containers. */
   public static final String NM_DOCKER_PRIVILEGED_CONTAINERS_ACL =
   DOCKER_CONTAINER_RUNTIME_PREFIX + "privileged-containers.acl";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 0440458..8453dc7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1643,6 +1643,25 @@
   
 
   
+Property to enable docker user remapping
+
yarn.nodemanager.runtime.linux.docker.enable-userremapping.allowed
+false
+  
+
+  
+lower limit for acceptable uids of user remapped 
user
+
yarn.nodemanager.runtime.linux.docker.userremapping-uid-threshold
+1
+  
+
+
+  
+lower limit for acceptable gids of user remapped 
user
+
yarn.nodemanager.runtime.linux.docker.userremapping-gid-threshold
+1
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd1a72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java

[28/50] hadoop git commit: MAPREDUCE-6960. Shuffle Handler prints disk error stack traces for every read failure.

2017-09-21 Thread asuresh
MAPREDUCE-6960. Shuffle Handler prints disk error stack traces for every read 
failure.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/595d4784
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/595d4784
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/595d4784

Branch: refs/heads/YARN-6592
Commit: 595d478408104bdfe1f08efd79930e18862fafbb
Parents: 3a20deb
Author: Eric Payne 
Authored: Tue Sep 19 10:35:15 2017 -0500
Committer: Eric Payne 
Committed: Tue Sep 19 10:35:15 2017 -0500

--
 .../main/java/org/apache/hadoop/mapred/ShuffleHandler.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/595d4784/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index b7f2c6d..0eeae19 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -1088,7 +1089,11 @@ public class ShuffleHandler extends AuxiliaryService {
   }
   nextMap.addListener(new ReduceMapFileCount(reduceContext));
 } catch (IOException e) {
-  LOG.error("Shuffle error :", e);
+  if (e instanceof DiskChecker.DiskErrorException) {
+LOG.error("Shuffle error :" + e);
+  } else {
+LOG.error("Shuffle error :", e);
+  }
   String errorMessage = getErrorMessage(e);
   sendError(reduceContext.getCtx(), errorMessage,
   INTERNAL_SERVER_ERROR);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] hadoop git commit: HDFS-12472. Add JUNIT timeout to TestBlockStatsMXBean. Contributed by Bharat Viswanadham.

2017-09-21 Thread asuresh
HDFS-12472. Add JUNIT timeout to TestBlockStatsMXBean. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d7cc22a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d7cc22a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d7cc22a

Branch: refs/heads/YARN-6592
Commit: 8d7cc22ac286302960c7939bc53574cbfeab1846
Parents: 7618fa9
Author: Arpit Agarwal 
Authored: Sat Sep 16 10:09:27 2017 -0700
Committer: Arpit Agarwal 
Committed: Sat Sep 16 10:09:27 2017 -0700

--
 .../hdfs/server/blockmanagement/TestBlockStatsMXBean.java   | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d7cc22a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index bcf38d6..64364cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -41,8 +41,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.eclipse.jetty.util.ajax.JSON;
+import org.junit.rules.Timeout;
 
 /**
  * Class for testing {@link BlockStatsMXBean} implementation
@@ -51,6 +53,9 @@ public class TestBlockStatsMXBean {
 
   private MiniDFSCluster cluster;
 
+  @Rule
+  public Timeout globalTimeout = new Timeout(30);
+
   @Before
   public void setup() throws IOException {
 HdfsConfiguration conf = new HdfsConfiguration();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] hadoop git commit: HDFS-11799. Introduce a config to allow setting up write pipeline with fewer nodes than replication factor. Contributed by Brahma Reddy Battula

2017-09-21 Thread asuresh
HDFS-11799. Introduce a config to allow setting up write pipeline with fewer 
nodes than replication factor. Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fda1221c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fda1221c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fda1221c

Branch: refs/heads/YARN-6592
Commit: fda1221c55101d97ac62e1ee4e3ddf9a915d5363
Parents: 31b5840
Author: Brahma Reddy Battula 
Authored: Tue Sep 19 11:25:45 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Sep 19 11:25:45 2017 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  13 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java|  31 +-
 .../hdfs/client/HdfsClientConfigKeys.java   |   2 +
 .../src/main/resources/hdfs-default.xml |  17 ++
 .../TestReplaceDatanodeFailureReplication.java  | 291 +++
 .../hadoop/tools/TestHdfsConfigFields.java  |   4 +-
 6 files changed, 354 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 772049d..7e8e95b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -223,6 +223,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   final String clientName;
   final SocketFactory socketFactory;
   final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
+  final short dtpReplaceDatanodeOnFailureReplication;
   private final FileSystem.Statistics stats;
   private final URI namenodeUri;
   private final Random r = new Random();
@@ -305,7 +306,17 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
 this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
 this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
-
+this.dtpReplaceDatanodeOnFailureReplication = (short) conf
+.getInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+MIN_REPLICATION,
+HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+MIN_REPLICATION_DEFAULT);
+if (LOG.isDebugEnabled()) {
+  LOG.debug(
+  "Sets " + HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+  MIN_REPLICATION + " to "
+  + dtpReplaceDatanodeOnFailureReplication);
+}
 this.ugi = UserGroupInformation.getCurrentUser();
 
 this.namenodeUri = nameNodeUri;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fda1221c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 4eafca1..99fa5f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -1384,7 +1384,36 @@ class DataStreamer extends Daemon {
   setPipeline(lb);
 
   //find the new datanode
-  final int d = findNewDatanode(original);
+  final int d;
+  try {
+d = findNewDatanode(original);
+  } catch (IOException ioe) {
+// check the minimal number of nodes available to decide whether to
+// continue the write.
+
+//if live block location datanodes is greater than or equal to
+// HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+// MIN_REPLICATION threshold value, continue writing to the
+// remaining nodes. Otherwise throw exception.
+//
+// If HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
+// MIN_REPLICATION is set to 0 or less than zero, an exception will be
+// thrown if a replacement could not be found.
+
+if (dfsClient.dtpReplaceDatanodeOnFailureReplication > 0 && 
nodes.length
+>= dfsClient.dtpReplaceDatanodeOnFailureReplication) {
+  DFSClient.LOG.warn(
+  "Failed to find a new datanode 

[21/50] hadoop git commit: MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák.

2017-09-21 Thread asuresh
MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. 
Contributed by Gergery Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2018538f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2018538f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2018538f

Branch: refs/heads/YARN-6592
Commit: 2018538fdba1a95a6556187569e872fce7f9e1c3
Parents: 56ef527
Author: Akira Ajisaka 
Authored: Tue Sep 19 11:05:54 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Sep 19 11:05:54 2017 +0900

--
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 ---
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 ---
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 +++
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 ---
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 ---
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 9 files changed, 31 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index 7e98d7d..da4ec79 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,8 +29,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,6 +49,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,7 +83,8 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2018538f/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 8dec39d..7b73820 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,8 +29,6 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -49,6 +47,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for reading
@@ -77,7 +77,8 @@ import org.hsqldb.server.Server;
  */
 public class DBCountPageView extends Configured implements Tool {
 
-  private static final Log 

[38/50] hadoop git commit: HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. Contributed by SammiChen.

2017-09-21 Thread asuresh
HDFS-12447. Rename AddECPolicyResponse to AddErasureCodingPolicyResponse. 
Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a12f09ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a12f09ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a12f09ba

Branch: refs/heads/YARN-6592
Commit: a12f09ba3c4a3aa4c4558090c5e1b7bcaebe3b94
Parents: ce943eb
Author: Andrew Wang 
Authored: Wed Sep 20 11:51:17 2017 -0700
Committer: Andrew Wang 
Committed: Wed Sep 20 11:51:17 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 10 +--
 .../hadoop/hdfs/DistributedFileSystem.java  |  4 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java|  4 +-
 .../hdfs/protocol/AddECPolicyResponse.java  | 68 
 .../AddErasureCodingPolicyResponse.java | 68 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  2 +-
 .../ClientNamenodeProtocolTranslatorPB.java | 11 ++--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 ---
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 13 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 15 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  4 +-
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  7 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 25 +++
 .../server/namenode/TestFSEditLogLoader.java|  4 +-
 .../hdfs/server/namenode/TestFSImage.java   |  5 +-
 19 files changed, 147 insertions(+), 135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7e8e95b..8d51a9c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -102,7 +102,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.AclException;
-import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
+import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -2807,13 +2807,14 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public AddECPolicyResponse[] addErasureCodingPolicies(
+  public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
   ErasureCodingPolicy[] policies) throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("addErasureCodingPolicies")) {
   return namenode.addErasureCodingPolicies(policies);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 
@@ -2823,7 +2824,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 try (TraceScope ignored = tracer.newScope("removeErasureCodingPolicy")) {
   namenode.removeErasureCodingPolicy(ecPolicyName);
 } catch (RemoteException re) {
-  throw re.unwrapRemoteException(AccessControlException.class);
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a12f09ba/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f6331cf..c9f4490 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ 

[07/50] hadoop git commit: HADOOP-13714. Tighten up our compatibility guidelines for Hadoop 3

2017-09-21 Thread asuresh
HADOOP-13714. Tighten up our compatibility guidelines for Hadoop 3


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7618fa91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7618fa91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7618fa91

Branch: refs/heads/YARN-6592
Commit: 7618fa9194b40454405f11a25bec4e2d79506912
Parents: 38c14ef
Author: Daniel Templeton 
Authored: Sat Sep 16 09:20:33 2017 +0200
Committer: Daniel Templeton 
Committed: Sat Sep 16 09:20:33 2017 +0200

--
 .../src/site/markdown/Compatibility.md  | 645 +++
 .../site/markdown/InterfaceClassification.md| 227 ---
 2 files changed, 675 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7618fa91/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index 05b18b5..4fa8c02 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -20,109 +20,276 @@ Apache Hadoop Compatibility
 Purpose
 ---
 
-This document captures the compatibility goals of the Apache Hadoop project. 
The different types of compatibility between Hadoop releases that affects 
Hadoop developers, downstream projects, and end-users are enumerated. For each 
type of compatibility we:
+This document captures the compatibility goals of the Apache Hadoop project.
+The different types of compatibility between Hadoop releases that affect
+Hadoop developers, downstream projects, and end-users are enumerated. For each
+type of compatibility this document will:
 
 * describe the impact on downstream projects or end-users
 * where applicable, call out the policy adopted by the Hadoop developers when 
incompatible changes are permitted.
 
+All Hadoop interfaces are classified according to the intended audience and
+stability in order to maintain compatibility with previous releases. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details
+about the classifications.
+
+### Target Audience
+
+This document is intended for consumption by the Hadoop developer community.
+This document describes the lens through which changes to the Hadoop project
+should be viewed. In order for end users and third party developers to have
+confidence about cross-release compatibility, the developer community must
+ensure that development efforts adhere to these policies. It is the
+responsibility of the project committers to validate that all changes either
+maintain compatibility or are explicitly marked as incompatible.
+
+Within a component Hadoop developers are free to use Private and Limited 
Private
+APIs, but when using components from a different module Hadoop developers
+should follow the same guidelines as third-party developers: do not
+use Private or Limited Private (unless explicitly allowed) interfaces and
+prefer instead Stable interfaces to Evolving or Unstable interfaces where
+possible. Where not possible, the preferred solution is to expand the audience
+of the API rather than introducing or perpetuating an exception to these
+compatibility guidelines. When working within a Maven module Hadoop developers
+should observe where possible the same level of restraint with regard to
+using components located in other Maven modules.
+
+Above all, Hadoop developers must be mindful of the impact of their changes.
+Stable interfaces must not change between major releases. Evolving interfaces
+must not change between minor releases. New classes and components must be
+labeled appropriately for audience and stability. See the
+[Hadoop Interface Taxonomy](./InterfaceClassification.html) for details about
+when the various labels are appropriate. As a general rule, all new interfaces
+and APIs should have the most limited labels (e.g. Private Unstable) that will
+not inhibit the intent of the interface or API.
+
+### Notational Conventions
+
+The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD",
+"SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" are to be interpreted as
+described in [RFC 2119](http://tools.ietf.org/html/rfc2119).
+
+Deprecation
+---
+
+The Java API provides a @Deprecated annotation to mark an API element as
+flagged for removal. The standard meaning of the annotation is that the
+API element should not be used and may be removed in a later version.
+
+In all cases removing an element from an API is an incompatible
+change. In the case of 

[12/50] hadoop git commit: HDFS-12470. DiskBalancer: Some tests create plan files under system directory. Contributed by Hanisha Koneru.

2017-09-21 Thread asuresh
HDFS-12470. DiskBalancer: Some tests create plan files under system directory. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2dcba18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2dcba18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2dcba18

Branch: refs/heads/YARN-6592
Commit: a2dcba18531c6fa4b76325f5132773f12ddfc6d5
Parents: a4f9c7c
Author: Arpit Agarwal 
Authored: Mon Sep 18 09:53:24 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Sep 18 09:53:24 2017 -0700

--
 .../server/diskbalancer/command/TestDiskBalancerCommand.java| 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2dcba18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index b0b0b0c..1cebae0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -476,9 +476,12 @@ public class TestDiskBalancerCommand {
   public void testPlanJsonNode() throws Exception {
 final String planArg = String.format("-%s %s", PLAN,
 "a87654a9-54c7-4693-8dd9-c9c7021dc340");
+final Path testPath = new Path(
+PathUtils.getTestPath(getClass()),
+GenericTestUtils.getMethodName());
 final String cmdLine = String
 .format(
-"hdfs diskbalancer %s", planArg);
+"hdfs diskbalancer -out %s %s", testPath, planArg);
 runCommand(cmdLine);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] hadoop git commit: HDFS-12526. FSDirectory should use Time.monotonicNow for durations. Contributed by Bharat Viswanadham.

2017-09-21 Thread asuresh
HDFS-12526. FSDirectory should use Time.monotonicNow for durations. Contributed 
by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50849ec9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50849ec9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50849ec9

Branch: refs/heads/YARN-6592
Commit: 50849ec9eb9bad90586a95d2b2380ee6e8724d6b
Parents: 8b33663
Author: Akira Ajisaka 
Authored: Thu Sep 21 19:27:48 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Sep 21 19:27:48 2017 +0900

--
 .../java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50849ec9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 6604b5a..3c55112 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -752,7 +752,7 @@ public class FSDirectory implements Closeable {
 try {
   int threads = (initThreads < 1) ? 1 : initThreads;
   LOG.info("Initializing quota with " + threads + " thread(s)");
-  long start = Time.now();
+  long start = Time.monotonicNow();
   QuotaCounts counts = new QuotaCounts.Builder().build();
   ForkJoinPool p = new ForkJoinPool(threads);
   RecursiveAction task = new InitQuotaTask(getBlockStoragePolicySuite(),
@@ -760,7 +760,7 @@ public class FSDirectory implements Closeable {
   p.execute(task);
   task.join();
   p.shutdown();
-  LOG.info("Quota initialization completed in " + (Time.now() - start) +
+  LOG.info("Quota initialization completed in " + (Time.monotonicNow() - 
start) +
   " milliseconds\n" + counts);
 } finally {
   writeUnlock();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] hadoop git commit: MAPREDUCE-6964. BaileyBorweinPlouffe should use Time.monotonicNow for measuring durations. Contributed by Chetna Chaudhari

2017-09-21 Thread asuresh
MAPREDUCE-6964. BaileyBorweinPlouffe should use Time.monotonicNow for measuring 
durations. Contributed by Chetna Chaudhari


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9db0afa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9db0afa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9db0afa

Branch: refs/heads/YARN-6592
Commit: b9db0afa1e65d0343ad4e5760c36e042a3e704b0
Parents: 6bf921a
Author: Jason Lowe 
Authored: Thu Sep 21 09:37:19 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 21 09:37:19 2017 -0500

--
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9db0afa/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index 7e98d7d..b9987a5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.Reducer;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -379,7 +380,7 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
 
 // start a map/reduce job
 out.println("\nStarting Job ...");
-final long startTime = System.currentTimeMillis();
+final long startTime = Time.monotonicNow();
 try {
   if (!job.waitForCompletion(true)) {
 out.println("Job failed.");
@@ -388,7 +389,7 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
 } catch (Exception e) {
   throw new RuntimeException(e);
 } finally {
-  final double duration = (System.currentTimeMillis() - startTime)/1000.0;
+  final double duration = (Time.monotonicNow() - startTime)/1000.0;
   out.println("Duration is " + duration + " seconds.");
 }
 out.println("Output file: " + hexfile);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] hadoop git commit: YARN-6991. "Kill application" button does not show error if other user tries to kill the application for secure cluster. (Suma Shivaprasad via wangda)

2017-09-21 Thread asuresh
YARN-6991. "Kill application" button does not show error if other user tries to 
kill the application for secure cluster. (Suma Shivaprasad via wangda)

Change-Id: I7e7894b24609709f89064ee5882f055dbb09080b


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/263e2c69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/263e2c69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/263e2c69

Branch: refs/heads/YARN-6592
Commit: 263e2c692a4b0013766cd1f6b6d7ed674b2b1040
Parents: b9e423f
Author: Wangda Tan 
Authored: Thu Sep 21 12:00:53 2017 -0700
Committer: Wangda Tan 
Committed: Thu Sep 21 12:00:53 2017 -0700

--
 .../hadoop/yarn/server/webapp/AppBlock.java | 44 +++-
 1 file changed, 24 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/263e2c69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 08e75ac..b429b5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -137,6 +137,30 @@ public class AppBlock extends HtmlBlock {
 
 setTitle(join("Application ", aid));
 
+//Validate if able to read application attempts
+// which should also validate if kill is allowed for the user based on ACLs
+
+Collection attempts;
+try {
+  final GetApplicationAttemptsRequest request =
+  GetApplicationAttemptsRequest.newInstance(appID);
+  attempts = callerUGI.doAs(
+  new PrivilegedExceptionAction>() {
+@Override
+public Collection run() throws Exception 
{
+  return getApplicationAttemptsReport(request);
+}
+  });
+} catch (Exception e) {
+  String message =
+  "Failed to read the attempts of the application " + appID + ".";
+  LOG.error(message, e);
+  html.p().__(message).__();
+  return;
+}
+
+
 // YARN-6890. for secured cluster allow anonymous UI access, application 
kill
 // shouldn't be there.
 boolean unsecuredUIForSecuredCluster = 
UserGroupInformation.isSecurityEnabled()
@@ -183,26 +207,6 @@ public class AppBlock extends HtmlBlock {
 
 generateOverviewTable(app, schedulerPath, webUiType, appReport);
 
-Collection attempts;
-try {
-  final GetApplicationAttemptsRequest request =
-  GetApplicationAttemptsRequest.newInstance(appID);
-  attempts = callerUGI.doAs(
-  new PrivilegedExceptionAction>() {
-@Override
-public Collection run() throws Exception 
{
-  return getApplicationAttemptsReport(request);
-}
-  });
-} catch (Exception e) {
-  String message =
-  "Failed to read the attempts of the application " + appID + ".";
-  LOG.error(message, e);
-  html.p().__(message).__();
-  return;
-}
-
 createApplicationMetricsTable(html);
 
 html.__(InfoBlock.class);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] hadoop git commit: HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)

2017-09-21 Thread asuresh
HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ee25278
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ee25278
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ee25278

Branch: refs/heads/YARN-6592
Commit: 1ee25278c891e95ba2ab142e5b78aebd752ea163
Parents: 7c73292
Author: Haibo Chen 
Authored: Mon Sep 18 14:25:35 2017 -0700
Committer: Haibo Chen 
Committed: Mon Sep 18 14:25:35 2017 -0700

--
 hadoop-client-modules/hadoop-client/pom.xml | 31 
 1 file changed, 31 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ee25278/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index bed3f5c..6500ebf 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,6 +179,37 @@
 
 
   org.apache.hadoop
+  hadoop-yarn-client
+  compile
+  
+
+
+  org.apache.hadoop
+  hadoop-yarn-api
+
+
+  org.apache.hadoop
+  hadoop-annotations
+
+
+  com.google.guava
+  guava
+
+
+  commons-cli
+  commons-cli
+
+
+  log4j
+  log4j
+
+  
+
+
+
+  org.apache.hadoop
   hadoop-mapreduce-client-core
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] hadoop git commit: HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng Wang and Andrew Wang.

2017-09-21 Thread asuresh
HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng 
Wang and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59830ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59830ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59830ca7

Branch: refs/heads/YARN-6592
Commit: 59830ca772dfb5dcc8b3e5281ca482dea5a5fa3e
Parents: 7bbeacb
Author: Andrew Wang 
Authored: Tue Sep 19 13:44:42 2017 -0700
Committer: Andrew Wang 
Committed: Tue Sep 19 13:44:42 2017 -0700

--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 13 +++
 .../hadoop/hdfs/TestWriteReadStripedFile.java   | 24 
 .../hdfs/TestWriteStripedFileWithFailure.java   |  3 ++-
 3 files changed, 25 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 1489e48..c771d21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -79,10 +79,15 @@ public class StripedFileTestUtil {
 assertEquals("File length should be the same", fileLength, 
status.getLen());
   }
 
-  static void verifyPread(FileSystem fs, Path srcPath,  int fileLength,
-  byte[] expected, byte[] buf) throws IOException {
-final ErasureCodingPolicy ecPolicy =
-((DistributedFileSystem)fs).getErasureCodingPolicy(srcPath);
+  static void verifyPread(DistributedFileSystem fs, Path srcPath,
+  int fileLength, byte[] expected, byte[] buf) throws IOException {
+final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(srcPath);
+verifyPread(fs, srcPath, fileLength, expected, buf, ecPolicy);
+  }
+
+  static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
+  byte[] expected, byte[] buf, ErasureCodingPolicy ecPolicy)
+  throws IOException {
 try (FSDataInputStream in = fs.open(srcPath)) {
   int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
   ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59830ca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
index f27c978..805bcea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
@@ -47,12 +48,13 @@ import java.util.Random;
 public class TestWriteReadStripedFile {
   public static final Log LOG = 
LogFactory.getLog(TestWriteReadStripedFile.class);
   private final ErasureCodingPolicy ecPolicy =
-  StripedFileTestUtil.getDefaultECPolicy();
+  SystemErasureCodingPolicies.getByID(
+  SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
   private final int cellSize = ecPolicy.getCellSize();
   private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
   private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
   private final int numDNs = dataBlocks + parityBlocks;
-  private final int stripesPerBlock = 4;
+  private final int stripesPerBlock = 2;
   private final int blockSize = stripesPerBlock * cellSize;
   private final int blockGroupSize = blockSize * dataBlocks;
 
@@ -78,11 +80,10 @@ public class TestWriteReadStripedFile {
 false);
 cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
 fs = cluster.getFileSystem();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
+

[10/50] hadoop git commit: HDFS-12460. Make addErasureCodingPolicy an idempotent operation. Contributed by Sammi Chen

2017-09-21 Thread asuresh
HDFS-12460. Make addErasureCodingPolicy an idempotent operation. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f9af246
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f9af246
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f9af246

Branch: refs/heads/YARN-6592
Commit: 0f9af246e89e4ad3c4d7ff2c1d7ec9b397494a03
Parents: e81596d
Author: Kai Zheng 
Authored: Mon Sep 18 18:07:12 2017 +0800
Committer: Kai Zheng 
Committed: Mon Sep 18 18:07:12 2017 +0800

--
 .../hdfs/server/namenode/ErasureCodingPolicyManager.java  | 7 ---
 .../org/apache/hadoop/hdfs/TestErasureCodingPolicies.java | 2 +-
 .../hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java   | 4 ++--
 .../hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java  | 4 ++--
 4 files changed, 9 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 3a46c30..90699b4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -242,14 +242,15 @@ public final class ErasureCodingPolicyManager {
 policy.getSchema(), policy.getCellSize());
 for (ErasureCodingPolicy p : getPolicies()) {
   if (p.getName().equals(assignedNewName)) {
-throw new HadoopIllegalArgumentException("The policy name " +
-assignedNewName + " already exists");
+LOG.info("The policy name " + assignedNewName + " already exists");
+return p;
   }
   if (p.getSchema().equals(policy.getSchema()) &&
   p.getCellSize() == policy.getCellSize()) {
-throw new HadoopIllegalArgumentException("A policy with same schema "
+LOG.info("A policy with same schema "
 + policy.getSchema().toString() + " and cell size "
 + p.getCellSize() + " already exists");
+return p;
   }
 }
 policy.setName(assignedNewName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 19277c4..4f2040b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -718,7 +718,7 @@ public class TestErasureCodingPolicies {
 policyArray  = new ErasureCodingPolicy[]{policy0};
 responses = fs.addErasureCodingPolicies(policyArray);
 assertEquals(1, responses.length);
-assertFalse(responses[0].isSucceed());
+assertTrue(responses[0].isSucceed());
 
 // Test add policy successfully
 newPolicy =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f9af246/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index d217813..42ff698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -436,7 +436,7 @@ public class TestNamenodeRetryCache {
 
 LightWeightCache cacheSet = 
 (LightWeightCache) 
namesystem.getRetryCache().getCacheSet();
-assertEquals("Retry cache size is wrong", 26, cacheSet.size());
+assertEquals("Retry cache size is wrong", 34, cacheSet.size());
 
 Map oldEntries = 
 new 

[04/50] hadoop git commit: HDFS-10701. TestDFSStripedOutputStreamWithFailure#testBlockTokenExpired occasionally fails. Contributed by SammiChen.

2017-09-21 Thread asuresh
HDFS-10701. TestDFSStripedOutputStreamWithFailure#testBlockTokenExpired 
occasionally fails. Contributed by SammiChen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef8cd5dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef8cd5dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef8cd5dc

Branch: refs/heads/YARN-6592
Commit: ef8cd5dc565f901b4954befe784675e130e84c3c
Parents: 1a84c24
Author: Andrew Wang 
Authored: Fri Sep 15 16:20:36 2017 -0700
Committer: Andrew Wang 
Committed: Fri Sep 15 16:20:36 2017 -0700

--
 .../hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef8cd5dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index ea889e3..57da439 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -260,8 +260,6 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   @Test(timeout=24)
   public void testBlockTokenExpired() throws Exception {
-// TODO: this is very flaky, re-enable it later. See HDFS-12417.
-assumeTrue("Test has been temporarily disabled. See HDFS-12417.", false);
 final int length = dataBlocks * (blockSize - cellSize);
 final HdfsConfiguration conf = newHdfsConfiguration();
 
@@ -494,8 +492,8 @@ public class TestDFSStripedOutputStreamWithFailure {
   final BlockManager bm = nn.getNamesystem().getBlockManager();
   final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
 
-  // set a short token lifetime (1 second)
-  SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
+  // set a short token lifetime (6 second)
+  SecurityTestUtil.setBlockTokenLifetime(sm, 6000L);
 }
 
 final AtomicInteger pos = new AtomicInteger();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] hadoop git commit: HADOOP-7308. Remove unused TaskLogAppender configurations from log4j.properties. Contributed by Todd Lipcon and J.Andreina.

2017-09-21 Thread asuresh
HADOOP-7308. Remove unused TaskLogAppender configurations from 
log4j.properties. Contributed by Todd Lipcon and J.Andreina.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e58b247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e58b247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e58b247

Branch: refs/heads/YARN-6592
Commit: 7e58b2478ce10f54b9b9a647f22a69dd528a81e6
Parents: a9019e1
Author: Akira Ajisaka 
Authored: Wed Sep 20 21:07:45 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Sep 20 21:07:49 2017 +0900

--
 .../hadoop-common/src/main/conf/log4j.properties| 12 
 1 file changed, 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e58b247/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index bc1fa6c..5f4b22b 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -78,19 +78,7 @@ log4j.appender.console.layout.ConversionPattern=%d{ISO8601} 
%p %c{2}: %m%n
 #
 # TaskLog Appender
 #
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
 log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
 
 log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
 log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] hadoop git commit: YARN-7186. Fix finicky TestContainerManager tests. Contributed by Arun Suresh.

2017-09-21 Thread asuresh
YARN-7186. Fix finicky TestContainerManager tests. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/647b7527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/647b7527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/647b7527

Branch: refs/heads/YARN-6592
Commit: 647b7527a9cdf4717e7dcbbb660e5812b67a17f1
Parents: 12d9d7b
Author: Junping Du 
Authored: Tue Sep 19 18:31:15 2017 -0700
Committer: Junping Du 
Committed: Tue Sep 19 18:31:15 2017 -0700

--
 .../containermanager/TestContainerManager.java  | 128 ---
 .../TestContainerSchedulerQueuing.java  |  70 ++
 2 files changed, 70 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/647b7527/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
index 6eea77b..38df208 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
@@ -70,7 +70,6 @@ import 
org.apache.hadoop.yarn.api.records.ContainerRetryContext;
 import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -105,7 +104,6 @@ import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerSignalContext
 import 
org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
@@ -142,14 +140,6 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 exec.setConf(conf);
 return spy(exec);
   }
-
-  @Override
-  @Before
-  public void setup() throws IOException {
-conf.setInt(
-YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 0);
-super.setup();
-  }
   
   @Override
   protected ContainerManagerImpl
@@ -1945,122 +1935,4 @@ public class TestContainerManager extends 
BaseContainerManagerTest {
 Assert.assertTrue(response.getFailedRequests().get(cId).getMessage()
 .contains("Null resource visibility for local resource"));
   }
-
-  @Test
-  public void testContainerUpdateExecTypeOpportunisticToGuaranteed()
-  throws IOException, YarnException, InterruptedException {
-delayContainers = true;
-containerManager.start();
-// Construct the Container-id
-ContainerId cId = createContainerId(0);
-ContainerLaunchContext containerLaunchContext =
-recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
-StartContainerRequest scRequest =
-StartContainerRequest.newInstance(
-containerLaunchContext,
-createContainerToken(cId, DUMMY_RM_IDENTIFIER,
-context.getNodeId(), user, BuilderUtils.newResource(512, 1),
-context.getContainerTokenSecretManager(), null,
-ExecutionType.OPPORTUNISTIC));
-List list = new ArrayList<>();
-list.add(scRequest);
-StartContainersRequest allRequests =
-StartContainersRequest.newInstance(list);
-containerManager.startContainers(allRequests);
-// Make sure the container reaches RUNNING state
-BaseContainerManagerTest.waitForNMContainerState(containerManager, cId,
-org.apache.hadoop.yarn.server.nodemanager.
-containermanager.container.ContainerState.RUNNING);
-// Construct container resource increase request,
-List updateTokens = new ArrayList<>();
-Token containerToken =
-createContainerToken(cId, 1, DUMMY_RM_IDENTIFIER, 

[43/50] hadoop git commit: HDFS-12371. BlockVerificationFailures and BlocksVerified show up as 0 in Datanode JMX. Contributed by Hanisha Koneru.

2017-09-21 Thread asuresh
HDFS-12371. BlockVerificationFailures and BlocksVerified show up as 0 in 
Datanode JMX. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bf921a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bf921a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bf921a5

Branch: refs/heads/YARN-6592
Commit: 6bf921a5c3152a307b5c0903056d73ce07775a08
Parents: 10d7493
Author: Kihwal Lee 
Authored: Thu Sep 21 08:42:50 2017 -0500
Committer: Kihwal Lee 
Committed: Thu Sep 21 08:42:50 2017 -0500

--
 .../org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf921a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 8b29fce..181ef80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -37,6 +37,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
@@ -81,6 +82,8 @@ public class VolumeScanner extends Thread {
*/
   private final DataNode datanode;
 
+  private final DataNodeMetrics metrics;
+
   /**
* A reference to the volume that we're scanning.
*/
@@ -299,6 +302,7 @@ public class VolumeScanner extends Thread {
   VolumeScanner(Conf conf, DataNode datanode, FsVolumeReference ref) {
 this.conf = conf;
 this.datanode = datanode;
+this.metrics = datanode.getMetrics();
 this.ref = ref;
 this.volume = ref.getVolume();
 ScanResultHandler handler;
@@ -443,12 +447,14 @@ public class VolumeScanner extends Thread {
   throttler.setBandwidth(bytesPerSec);
   long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
   resultHandler.handle(block, null);
+  metrics.incrBlocksVerified();
   return bytesRead;
 } catch (IOException e) {
   resultHandler.handle(block, e);
 } finally {
   IOUtils.cleanup(null, blockSender);
 }
+metrics.incrBlockVerificationFailures();
 return -1;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] hadoop git commit: Revert "YARN-7162. Remove XML excludes file format (rkanter)" - wrong commit message

2017-09-21 Thread asuresh
Revert "YARN-7162. Remove XML excludes file format (rkanter)" - wrong commit 
message

This reverts commit 3a8d57a0a2e047b34be82f602a2b6cf5593d2125.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f496683
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f496683
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f496683

Branch: refs/heads/YARN-6592
Commit: 5f496683fb00ba26a6bf5a506ae87d4bc4088727
Parents: a2dcba1
Author: Robert Kanter 
Authored: Mon Sep 18 10:32:08 2017 -0700
Committer: Robert Kanter 
Committed: Mon Sep 18 10:32:08 2017 -0700

--
 .../hadoop-mapreduce-client-core/pom.xml|  4 --
 .../hadoop/mapreduce/JobResourceUploader.java   | 17 
 .../apache/hadoop/mapreduce/MRJobConfig.java|  5 ---
 .../src/main/resources/mapred-default.xml   |  9 
 .../mapreduce/TestJobResourceUploader.java  | 46 
 5 files changed, 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index ce5fdc8..c34f7bd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -44,10 +44,6 @@
 
 
   org.apache.hadoop
-  hadoop-hdfs-client
-
-
-  org.apache.hadoop
   hadoop-hdfs
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index d9bf988..f1cad57 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -36,8 +36,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 
@@ -96,11 +94,6 @@ class JobResourceUploader {
 new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
 mkdirs(jtFs, submitJobDir, mapredSysPerms);
 
-if (!conf.getBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
-MRJobConfig.DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED)) {
-  disableErasureCodingForPath(jtFs, submitJobDir);
-}
-
 Collection files = conf.getStringCollection("tmpfiles");
 Collection libjars = conf.getStringCollection("tmpjars");
 Collection archives = conf.getStringCollection("tmparchives");
@@ -582,14 +575,4 @@ class JobResourceUploader {
 }
 return finalPath;
   }
-
-  private void disableErasureCodingForPath(FileSystem fs, Path path)
-  throws IOException {
-if (jtFs instanceof DistributedFileSystem) {
-  LOG.info("Disabling Erasure Coding for path: " + path);
-  DistributedFileSystem dfs = (DistributedFileSystem) jtFs;
-  dfs.setErasureCodingPolicy(path,
-  SystemErasureCodingPolicies.getReplicationPolicy().getName());
-}
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f496683/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 86abb42..2023ba3 100644
--- 

[16/50] hadoop git commit: YARN-6570. No logs were found for running application, running container. Contributed by Junping Du

2017-09-21 Thread asuresh
YARN-6570. No logs were found for running application, running
container. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c732924
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c732924
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c732924

Branch: refs/heads/YARN-6592
Commit: 7c732924a889cd280e972882619a1827877fbafa
Parents: 29dd551
Author: Xuan 
Authored: Mon Sep 18 14:04:05 2017 -0700
Committer: Xuan 
Committed: Mon Sep 18 14:04:05 2017 -0700

--
 .../nodemanager/containermanager/container/ContainerImpl.java | 1 +
 .../org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java  | 3 ++-
 .../nodemanager/containermanager/container/TestContainer.java | 3 +++
 3 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index df107a7..836e70e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -616,6 +616,7 @@ public class ContainerImpl implements Container {
   public org.apache.hadoop.yarn.api.records.ContainerState getCurrentState() {
 switch (stateMachine.getCurrentState()) {
 case NEW:
+  return org.apache.hadoop.yarn.api.records.ContainerState.NEW;
 case LOCALIZING:
 case LOCALIZATION_FAILED:
 case SCHEDULED:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
index 8e4522b..9e59449 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
@@ -159,7 +159,8 @@ public class TestEventFlow {
 containerManager.startContainers(allRequests);
 
 BaseContainerManagerTest.waitForContainerState(containerManager, cID,
-Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED), 20);
+Arrays.asList(ContainerState.RUNNING, ContainerState.SCHEDULED,
+ContainerState.NEW), 20);
 
 List containerIds = new ArrayList();
 containerIds.add(cID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c732924/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
index 64e6cf0..b44b500 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java
+++ 

[40/50] hadoop git commit: YARN-6771. Use classloader inside configuration class to make new classes. Contributed by Jongyoul Lee.

2017-09-21 Thread asuresh
YARN-6771. Use classloader inside configuration class to make new
classes. Contributed by Jongyoul Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b336632
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b336632
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b336632

Branch: refs/heads/YARN-6592
Commit: 8b336632acad10e45d029596c5e3196e1857d891
Parents: 53047f9
Author: Sangjin Lee 
Authored: Wed Sep 20 22:22:43 2017 -0700
Committer: Sangjin Lee 
Committed: Wed Sep 20 22:22:43 2017 -0700

--
 .../impl/pb/RpcClientFactoryPBImpl.java |  3 +-
 .../impl/pb/RpcServerFactoryPBImpl.java |  5 +-
 .../impl/pb/TestRpcClientFactoryPBImpl.java | 49 
 .../impl/pb/TestRpcServerFactoryPBImpl.java | 48 +++
 4 files changed, 100 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
index 062fa66..07c5e23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcClientFactoryPBImpl.java
@@ -45,7 +45,6 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
   private static final String PB_IMPL_CLASS_SUFFIX = "PBClientImpl";
   
   private static final RpcClientFactoryPBImpl self = new 
RpcClientFactoryPBImpl();
-  private Configuration localConf = new Configuration();
   private ConcurrentMap cache = new 
ConcurrentHashMap();
   
   public static RpcClientFactoryPBImpl get() {
@@ -62,7 +61,7 @@ public class RpcClientFactoryPBImpl implements 
RpcClientFactory {
 if (constructor == null) {
   Class pbClazz = null;
   try {
-pbClazz = localConf.getClassByName(getPBImplClassName(protocol));
+pbClazz = conf.getClassByName(getPBImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
 + getPBImplClassName(protocol) + "]", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b336632/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
index 60e549a..ec9a5f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/factories/impl/pb/RpcServerFactoryPBImpl.java
@@ -51,7 +51,6 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
   
   private static final RpcServerFactoryPBImpl self = new 
RpcServerFactoryPBImpl();
 
-  private Configuration localConf = new Configuration();
   private ConcurrentMap serviceCache = new 
ConcurrentHashMap();
   private ConcurrentMap protoCache = new 
ConcurrentHashMap();
   
@@ -80,7 +79,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (constructor == null) {
   Class pbServiceImplClazz = null;
   try {
-pbServiceImplClazz = localConf
+pbServiceImplClazz = conf
 .getClassByName(getPbServiceImplClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed to load class: ["
@@ -113,7 +112,7 @@ public class RpcServerFactoryPBImpl implements 
RpcServerFactory {
 if (method == null) {
   Class protoClazz = null;
   try {
-protoClazz = localConf.getClassByName(getProtoClassName(protocol));
+protoClazz = conf.getClassByName(getProtoClassName(protocol));
   } catch (ClassNotFoundException e) {
 throw new YarnRuntimeException("Failed 

[50/50] hadoop git commit: YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

2017-09-21 Thread asuresh
YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos 
via wangda)

Change-Id: Id00edb7185fdf01cce6e40f920cac3585f8cbe9c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0e16d41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0e16d41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0e16d41

Branch: refs/heads/YARN-6592
Commit: a0e16d41d0d76306eef05e267934d8c2eef15cf6
Parents: bfd1a72
Author: Wangda Tan 
Authored: Thu Aug 3 14:03:55 2017 -0700
Committer: Arun Suresh 
Committed: Thu Sep 21 18:51:50 2017 -0700

--
 .../yarn/api/resource/PlacementConstraint.java  | 567 +++
 .../yarn/api/resource/PlacementConstraints.java | 286 ++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../src/main/proto/yarn_protos.proto|  55 ++
 .../api/resource/TestPlacementConstraints.java  | 106 
 .../PlacementConstraintFromProtoConverter.java  | 116 
 .../pb/PlacementConstraintToProtoConverter.java | 174 ++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../yarn/api/records/impl/pb/ProtoUtils.java|  27 +
 .../PlacementConstraintTransformations.java | 209 +++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../TestPlacementConstraintPBConversion.java| 195 +++
 .../TestPlacementConstraintTransformations.java | 183 ++
 13 files changed, 1987 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0e16d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
new file mode 100644
index 000..f0e3982
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -0,0 +1,567 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code PlacementConstraint} represents a placement constraint for a resource
+ * allocation.
+ */
+@Public
+@Unstable
+public class PlacementConstraint {
+
+  /**
+   * The constraint expression tree.
+   */
+  private AbstractConstraint constraintExpr;
+
+  public PlacementConstraint(AbstractConstraint constraintExpr) {
+this.constraintExpr = constraintExpr;
+  }
+
+  /**
+   * Get the constraint expression of the placement constraint.
+   *
+   * @return the constraint expression
+   */
+  public AbstractConstraint getConstraintExpr() {
+return constraintExpr;
+  }
+
+  /**
+   * Interface used to enable the elements of the constraint tree to be 
visited.
+   */
+  @Private
+  public interface Visitable {
+/**
+ * Visitor pattern.
+ *
+ * @param visitor visitor to be used
+ * @param  defines the type that the visitor will use and the return 
type
+ *  of the accept.
+ * @return the result of visiting a given object.
+ */
+ T accept(Visitor visitor);
+
+  }
+
+  /**
+   * Visitor API for a constraint tree.
+   *
+   * @param  determines the return type of the visit methods.
+   */
+  @Private
+  public interface Visitor {
+T visit(SingleConstraint constraint);
+
+T visit(TargetExpression target);
+
+T visit(TargetConstraint constraint);
+
+T visit(CardinalityConstraint constraint);
+
+T visit(And 

[19/50] hadoop git commit: HADOOP-14835. mvn site build throws SAX errors. Contributed by Andrew Wang and Sean Mackrory.

2017-09-21 Thread asuresh
HADOOP-14835. mvn site build throws SAX errors. Contributed by Andrew Wang and 
Sean Mackrory.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cf3540f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cf3540f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cf3540f

Branch: refs/heads/YARN-6592
Commit: 3cf3540f19b5fd1a174690db9f1b7be2977d96ba
Parents: b3d6130
Author: Andrew Wang 
Authored: Mon Sep 18 15:13:42 2017 -0700
Committer: Andrew Wang 
Committed: Mon Sep 18 15:13:42 2017 -0700

--
 BUILDING.txt   |  2 ++
 dev-support/bin/create-release |  1 +
 .../hadoop-mapreduce-client/pom.xml| 17 -
 hadoop-project-dist/pom.xml| 17 -
 hadoop-project/pom.xml |  2 ++
 hadoop-yarn-project/hadoop-yarn/pom.xml| 17 -
 6 files changed, 53 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 14deec8..47aaab4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -308,6 +308,8 @@ Create a local staging version of the website (in 
/tmp/hadoop-site)
 
   $ mvn clean site -Preleasedocs; mvn site:stage 
-DstagingDirectory=/tmp/hadoop-site
 
+Note that the site needs to be built in a second pass after other artifacts.
+
 
--
 Installing Hadoop
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index b22e90b..b98c058 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -564,6 +564,7 @@ function makearelease
 "${MVN}" "${MVN_ARGS[@]}" install \
   site site:stage \
   -DskipTests \
+  -DskipShade \
   -Pdist,src \
   "${DOCFLAGS}"
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
--
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
index aa7c7b1..274a821 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
@@ -196,6 +196,13 @@
 -unstable
 512m
   
+  
+
+  xerces
+  xercesImpl
+  ${xerces.jdiff.version}
+
+  
   
 
   
@@ -238,6 +245,14 @@
   
${project.build.directory}
   hadoop-annotations.jar
 
+
+  xerces
+  xercesImpl
+  ${xerces.version.jdiff}
+  false
+  
${project.build.directory}
+  xerces.jar
+
   
 
   
@@ -275,7 +290,7 @@

sourceFiles="${dev-support.relative.dir}/jdiff/Null.java"
maxmemory="${jdiff.javadoc.maxmemory}">
 
+
path="${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar">
   
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf3540f/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index addc2a5..8815dd4 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -152,6 +152,13 @@
 
 512m
   
+  
+
+  xerces
+  xercesImpl
+  ${xerces.jdiff.version}
+
+  
   
 
   
@@ -194,6 +201,14 @@
   
${project.build.directory}
   hadoop-annotations.jar
 
+
+  xerces
+  xercesImpl
+  ${xerces.jdiff.version}
+  false
+  
${project.build.directory}
+  xerces.jar
+
   
 
   
@@ -259,7 +274,7 @@
  

[39/50] hadoop git commit: Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in hadoop-mapreduce-examples. Contributed by Gergery Novák."

2017-09-21 Thread asuresh
Revert "MAPREDUCE-6947. Moving logging APIs over to slf4j in 
hadoop-mapreduce-examples. Contributed by Gergery Novák."

This reverts commit 2018538fdba1a95a6556187569e872fce7f9e1c3.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53047f93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53047f93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53047f93

Branch: refs/heads/YARN-6592
Commit: 53047f934e3f81237ac9f0d75dddfc44862ef2d9
Parents: a12f09b
Author: Akira Ajisaka 
Authored: Thu Sep 21 11:16:05 2017 +0900
Committer: Akira Ajisaka 
Committed: Thu Sep 21 11:16:05 2017 +0900

--
 .../java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java | 7 +++
 .../main/java/org/apache/hadoop/examples/DBCountPageView.java | 7 +++
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java | 7 ---
 .../src/main/java/org/apache/hadoop/examples/pi/DistSum.java  | 6 +++---
 .../java/org/apache/hadoop/examples/terasort/TeraGen.java | 6 +++---
 .../org/apache/hadoop/examples/terasort/TeraOutputFormat.java | 7 +++
 .../org/apache/hadoop/examples/terasort/TeraScheduler.java| 7 +++
 .../java/org/apache/hadoop/examples/terasort/TeraSort.java| 6 +++---
 .../org/apache/hadoop/examples/terasort/TestTeraSort.java | 6 +++---
 9 files changed, 28 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
index da4ec79..7e98d7d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java
@@ -29,6 +29,8 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -49,8 +51,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Charsets;
 
@@ -83,8 +83,7 @@ public class BaileyBorweinPlouffe extends Configured 
implements Tool {
   private static final String DIGIT_SIZE_PROPERTY = NAME + ".digit.size";
   private static final String DIGIT_PARTS_PROPERTY = NAME + ".digit.parts";
 
-  private static final Logger LOG =
-  LoggerFactory.getLogger(BaileyBorweinPlouffe.class);
+  private static final Log LOG = LogFactory.getLog(BaileyBorweinPlouffe.class);
 
   /** Mapper class computing digits of Pi. */
   public static class BbpMapper extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53047f93/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
index 7b73820..8dec39d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/DBCountPageView.java
@@ -29,6 +29,8 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Random;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
@@ -47,8 +49,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.hsqldb.server.Server;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * This is a demonstrative program, which uses DBInputFormat for reading
@@ -77,8 +77,7 @@ import org.slf4j.LoggerFactory;
  */
 public class 

[32/50] hadoop git commit: HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.

2017-09-21 Thread asuresh
HDFS-12437. Fix test setup in TestLeaseRecoveryStriped.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12d9d7bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12d9d7bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12d9d7bc

Branch: refs/heads/YARN-6592
Commit: 12d9d7bc509bca82b8f40301e3dc5ca764be45eb
Parents: 51edaac
Author: Andrew Wang 
Authored: Tue Sep 19 16:42:20 2017 -0700
Committer: Andrew Wang 
Committed: Tue Sep 19 16:42:20 2017 -0700

--
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   | 156 ++-
 1 file changed, 113 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12d9d7bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 2846dbf..36ac8b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.commons.lang.builder.ToStringBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -28,6 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.IOUtils;
@@ -40,34 +40,41 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 
 public class TestLeaseRecoveryStriped {
-  public static final Log LOG = LogFactory
-  .getLog(TestLeaseRecoveryStriped.class);
+  public static final Logger LOG = LoggerFactory
+  .getLogger(TestLeaseRecoveryStriped.class);
 
   private final ErasureCodingPolicy ecPolicy =
   StripedFileTestUtil.getDefaultECPolicy();
   private final int dataBlocks = ecPolicy.getNumDataUnits();
   private final int parityBlocks = ecPolicy.getNumParityUnits();
   private final int cellSize = ecPolicy.getCellSize();
-  private final int stripSize = dataBlocks * cellSize;
-  private final int stripesPerBlock = 15;
+  private final int stripeSize = dataBlocks * cellSize;
+  private final int stripesPerBlock = 4;
   private final int blockSize = cellSize * stripesPerBlock;
   private final int blockGroupSize = blockSize * dataBlocks;
   private static final int bytesPerChecksum = 512;
 
   static {
 GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
+GenericTestUtils.setLogLevel(DFSStripedOutputStream.LOG, Level.DEBUG);
+GenericTestUtils.setLogLevel(BlockRecoveryWorker.LOG, Level.DEBUG);
+GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.DEBUG);
   }
 
   static private final String fakeUsername = "fakeUser1";
@@ -83,7 +90,7 @@ public class TestLeaseRecoveryStriped {
   public void setup() throws IOException {
 conf = new HdfsConfiguration();
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
-conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
+conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6L);
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
 false);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -104,78 +111,118 @@ public class TestLeaseRecoveryStriped {
 }
   }
 
-  private int[][][] getBlockLengthsSuite() {
+  private static class BlockLengths {
+private final int[] blockLengths;
+private final long safeLength;
+
+BlockLengths(ErasureCodingPolicy 

[45/50] hadoop git commit: HDFS-12496. Make QuorumJournalManager timeout properties configurable. Contributed by Ajay Kumar.

2017-09-21 Thread asuresh
HDFS-12496. Make QuorumJournalManager timeout properties configurable. 
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9e423fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9e423fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9e423fa

Branch: refs/heads/YARN-6592
Commit: b9e423fa8d30ea89244f6ec018a8064cc87d94a9
Parents: b9db0af
Author: Arpit Agarwal 
Authored: Thu Sep 21 08:44:43 2017 -0700
Committer: Arpit Agarwal 
Committed: Thu Sep 21 08:44:43 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../qjournal/client/QuorumJournalManager.java   | 39 +---
 .../src/main/resources/hdfs-default.xml | 11 ++
 3 files changed, 33 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e423fa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 91f3bb9..b4842f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -725,6 +725,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.edit.log.transfer.bandwidthPerSec";
   public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no 
throttling
 
+  public static final String DFS_QJM_OPERATIONS_TIMEOUT =
+  "dfs.qjm.operations.timeout";
+  public static final long DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT = 6;
+
   // Datanode File IO Stats
   public static final String DFS_DATANODE_ENABLE_FILEIO_FAULT_INJECTION_KEY =
   "dfs.datanode.enable.fileio.fault.injection";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9e423fa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index 97c0050..f66e2c0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.PriorityQueue;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
@@ -76,18 +77,10 @@ public class QuorumJournalManager implements JournalManager 
{
   private final int newEpochTimeoutMs;
   private final int writeTxnsTimeoutMs;
 
-  // Since these don't occur during normal operation, we can
-  // use rather lengthy timeouts, and don't need to make them
-  // configurable.
-  private static final int FORMAT_TIMEOUT_MS= 6;
-  private static final int HASDATA_TIMEOUT_MS   = 6;
-  private static final int CAN_ROLL_BACK_TIMEOUT_MS = 6;
-  private static final int FINALIZE_TIMEOUT_MS  = 6;
-  private static final int PRE_UPGRADE_TIMEOUT_MS   = 6;
-  private static final int ROLL_BACK_TIMEOUT_MS = 6;
-  private static final int DISCARD_SEGMENTS_TIMEOUT_MS  = 6;
-  private static final int UPGRADE_TIMEOUT_MS   = 6;
-  private static final int GET_JOURNAL_CTIME_TIMEOUT_MS = 6;
+  // This timeout is used for calls that don't occur during normal operation
+  // e.g. format, upgrade operations and a few others. So we can use rather
+  // lengthy timeouts by default.
+  private final int timeoutMs;
   
   private final Configuration conf;
   private final URI uri;
@@ -141,6 +134,10 @@ public class QuorumJournalManager implements 
JournalManager {
 this.writeTxnsTimeoutMs = conf.getInt(
 DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY,
 DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT);
+this.timeoutMs = (int) conf.getTimeDuration(DFSConfigKeys
+.DFS_QJM_OPERATIONS_TIMEOUT,
+DFSConfigKeys.DFS_QJM_OPERATIONS_TIMEOUT_DEFAULT, TimeUnit
+.MILLISECONDS);
   }
   
   protected List createLoggers(
@@ -201,7 +198,7 @@ public class QuorumJournalManager implements JournalManager 
{
   public void 

[18/50] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe

2017-09-21 Thread asuresh
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d61304
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d61304
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d61304

Branch: refs/heads/YARN-6592
Commit: b3d61304f2fa4a99526f7a60ccaac9f262083079
Parents: 1ee2527
Author: Jason Lowe 
Authored: Mon Sep 18 17:04:43 2017 -0500
Committer: Jason Lowe 
Committed: Mon Sep 18 17:04:43 2017 -0500

--
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d61304/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..06a3e42 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
 
-  // this audit log is disabled by default,
-  // to turn it on please enable this audit log
-  // on log4j.properties by uncommenting the setting
-  if (AUDITLOG.isDebugEnabled()) {
-AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
- " reducer " + reduceQ.get(0));
-  }
   int reduceId;
   String jobId;
   try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
 
   // Now set the response headers.
   setResponseHeaders(response, keepAliveParam, contentLength);
+
+  // this audit log is disabled by default,
+  // to turn it on please enable this audit log
+  // on log4j.properties by uncommenting the setting
+  if (AUDITLOG.isDebugEnabled()) {
+StringBuilder sb = new StringBuilder("shuffle for ").append(jobId);
+sb.append(" mappers: ").append(mapIds);
+sb.append(" reducer ").append(reduce);
+sb.append(" length ").append(contentLength);
+AUDITLOG.debug(sb.toString());
+  }
 }
 
 protected void setResponseHeaders(HttpResponse response,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] hadoop git commit: HDFS-11035. Better documentation for maintenace mode and upgrade domain.

2017-09-21 Thread asuresh
HDFS-11035. Better documentation for maintenace mode and upgrade domain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce943eb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce943eb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce943eb1

Branch: refs/heads/YARN-6592
Commit: ce943eb17a4218d8ac1f5293c6726122371d8442
Parents: 230b85d
Author: Ming Ma 
Authored: Wed Sep 20 09:36:33 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:36:33 2017 -0700

--
 .../src/site/markdown/HdfsDataNodeAdminGuide.md | 165 ++
 .../src/site/markdown/HdfsUpgradeDomain.md  | 167 +++
 hadoop-project/src/site/site.xml|   4 +-
 3 files changed, 335 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce943eb1/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
new file mode 100644
index 000..d6f288e
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDataNodeAdminGuide.md
@@ -0,0 +1,165 @@
+
+
+HDFS DataNode Admin Guide
+=
+
+
+
+Overview
+
+
+The Hadoop Distributed File System (HDFS) namenode maintains states of all 
datanodes.
+There are two types of states. The fist type describes the liveness of a 
datanode indicating if
+the node is live, dead or stale. The second type describes the admin state 
indicating if the node
+is in service, decommissioned or under maintenance.
+
+When an administrator decommission a datanode, the datanode will first be 
transitioned into
+`DECOMMISSION_INPROGRESS` state. After all blocks belonging to that datanode 
have been fully replicated elsewhere
+based on each block's replication factor. the datanode will be transitioned to 
`DECOMMISSIONED` state. After that,
+the administrator can shutdown the node to perform long-term repair and 
maintenance that could take days or weeks.
+After the machine has been repaired, the machine can be recommissioned back to 
the cluster.
+
+Sometimes administrators only need to take datanodes down for minutes/hours to 
perform short-term repair/maintenance.
+In such scenario, the HDFS block replication overhead incurred by decommission 
might not be necessary and a light-weight process is desirable.
+And that is what maintenance state is used for. When an administrator put a 
datanode in maintenance state, the datanode will first be transitioned
+to `ENTERING_MAINTENANCE` state. As long as all blocks belonging to that 
datanode is minimally replicated elsewhere, the datanode
+will immediately be transitioned to `IN_MAINTENANCE` state. After the 
maintenance has completed, the administrator can take the datanode
+out of the maintenance state. In addition, maintenance state supports timeout 
that allows administrators to config the maximum duration in
+which a datanode is allowed to stay in maintenance state. After the timeout, 
the datanode will be transitioned out of maintenance state
+automatically by HDFS without human intervention.
+
+In summary, datanode admin operations include the followings:
+
+* Decommission
+* Recommission
+* Putting nodes in maintenance state
+* Taking nodes out of maintenance state
+
+And datanode admin states include the followings:
+
+* `NORMAL` The node is in service.
+* `DECOMMISSIONED` The node has been decommissioned.
+* `DECOMMISSION_INPROGRESS` The node is being transitioned to DECOMMISSIONED 
state.
+* `IN_MAINTENANCE` The node in in maintenance state.
+* `ENTERING_MAINTENANCE` The node is being transitioned to maintenance state.
+
+
+Host-level settings
+---
+
+To perform any of datanode admin operations, there are two steps.
+
+* Update host-level configuration files to indicate the desired admin states 
of targeted datanodes. There are two supported formats for configuration files.
+* Hostname-only configuration. Each line includes the hostname/ip address 
for a datanode. That is the default format.
+* JSON-based configuration. The configuration is in JSON format. Each 
element maps to one datanode and each datanode can have multiple properties. 
This format is required to put datanodes to maintenance states.
+
+* Run the following command to have namenode reload the host-level 
configuration files.
+`hdfs dfsadmin [-refreshNodes]`
+
+### Hostname-only configuration
+This is the default configuration used by the namenode. It only supports node 
decommission and recommission; it doesn't support admin operations related to 
maintenance state. Use 

[42/50] hadoop git commit: YARN-6968. Hardcoded absolute pathname in DockerLinuxContainerRuntime. Contributed by Eric Badger

2017-09-21 Thread asuresh
YARN-6968. Hardcoded absolute pathname in DockerLinuxContainerRuntime. 
Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10d74935
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10d74935
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10d74935

Branch: refs/heads/YARN-6592
Commit: 10d7493587643b52cee5fde87eca9ef99c422a70
Parents: 50849ec
Author: Jason Lowe 
Authored: Thu Sep 21 08:38:08 2017 -0500
Committer: Jason Lowe 
Committed: Thu Sep 21 08:38:08 2017 -0500

--
 .../linux/resources/CGroupsHandler.java|  6 ++
 .../linux/resources/CGroupsHandlerImpl.java|  5 +
 .../linux/runtime/DockerLinuxContainerRuntime.java | 13 -
 .../linux/runtime/TestDockerContainerRuntime.java  | 13 ++---
 4 files changed, 29 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10d74935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
index 82bd366..5f4d3e4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandler.java
@@ -173,4 +173,10 @@ public interface CGroupsHandler {
*/
   String getCGroupParam(CGroupController controller, String cGroupId,
   String param) throws ResourceHandlerException;
+
+  /**
+   * Returns CGroup Mount Path.
+   * @return parameter value as read from the parameter file
+   */
+  String getCGroupMountPath();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10d74935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index f37dfd3..1c6385d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -603,4 +603,9 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   "Unable to read from " + cGroupParamPath);
 }
   }
+
+  @Override
+  public String getCGroupMountPath() {
+return cGroupMountPath;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10d74935/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 8217564..1ccd365 100644
--- 

[03/50] hadoop git commit: YARN-7174. Add retry logic in LogsCLI when fetch running application logs. Contributed by Xuan Gong.

2017-09-21 Thread asuresh
YARN-7174. Add retry logic in LogsCLI when fetch running application logs. 
Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a84c24b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a84c24b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a84c24b

Branch: refs/heads/YARN-6592
Commit: 1a84c24b0cf6674fa755403971fa57d8e412b320
Parents: 90894c7
Author: Junping Du 
Authored: Fri Sep 15 15:33:24 2017 -0700
Committer: Junping Du 
Committed: Fri Sep 15 15:33:24 2017 -0700

--
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  | 175 +++-
 .../hadoop/yarn/client/cli/TestLogsCLI.java | 205 +--
 2 files changed, 309 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a84c24b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
index 1a3db26..9a8ba4a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
@@ -22,6 +22,9 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.PrintStream;
+import java.net.ConnectException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -75,9 +78,11 @@ import org.codehaus.jettison.json.JSONObject;
 import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientRequest;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.UniformInterfaceException;
 import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.filter.ClientFilter;
 
 @Public
 @Evolving
@@ -98,14 +103,27 @@ public class LogsCLI extends Configured implements Tool {
   = "show_container_log_info";
   private static final String OUT_OPTION = "out";
   private static final String SIZE_OPTION = "size";
+  private static final String CLIENT_MAX_RETRY_OPTION = "client_max_retries";
+  private static final String CLIENT_RETRY_INTERVAL_OPTION
+  = "client_retry_interval_ms";
   public static final String HELP_CMD = "help";
+
   private PrintStream outStream = System.out;
   private YarnClient yarnClient = null;
+  private Client webServiceClient = null;
+
+  private static final int DEFAULT_MAX_RETRIES = 30;
+  private static final long DEFAULT_RETRY_INTERVAL = 1000;
+
+  @Private
+  @VisibleForTesting
+  ClientConnectionRetry connectionRetry;
 
   @Override
   public int run(String[] args) throws Exception {
 try {
   yarnClient = createYarnClient();
+  webServiceClient = Client.create();
   return runCommand(args);
 } finally {
   if (yarnClient != null) {
@@ -140,6 +158,8 @@ public class LogsCLI extends Configured implements Tool {
 List amContainersList = new ArrayList();
 String localDir = null;
 long bytes = Long.MAX_VALUE;
+int maxRetries = DEFAULT_MAX_RETRIES;
+long retryInterval = DEFAULT_RETRY_INTERVAL;
 try {
   CommandLine commandLine = parser.parse(opts, args, false);
   appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION);
@@ -171,6 +191,14 @@ public class LogsCLI extends Configured implements Tool {
   if (commandLine.hasOption(SIZE_OPTION)) {
 bytes = Long.parseLong(commandLine.getOptionValue(SIZE_OPTION));
   }
+  if (commandLine.hasOption(CLIENT_MAX_RETRY_OPTION)) {
+maxRetries = Integer.parseInt(commandLine.getOptionValue(
+CLIENT_MAX_RETRY_OPTION));
+  }
+  if (commandLine.hasOption(CLIENT_RETRY_INTERVAL_OPTION)) {
+retryInterval = Long.parseLong(commandLine.getOptionValue(
+CLIENT_RETRY_INTERVAL_OPTION));
+  }
 } catch (ParseException e) {
   System.err.println("options parsing failed: " + e.getMessage());
   printHelpMessage(printOpts);
@@ -232,6 +260,11 @@ public class LogsCLI extends Configured implements Tool {
   }
 }
 
+// Set up Retry WebService Client
+connectionRetry = new ClientConnectionRetry(maxRetries, retryInterval);
+ClientJerseyRetryFilter retryFilter = new 

[33/50] hadoop git commit: YARN-6499. Remove the doc about Schedulable#redistributeShare(). (Contributed by Chetna Chaudhari via Yufei Gu)

2017-09-21 Thread asuresh
YARN-6499. Remove the doc about Schedulable#redistributeShare(). (Contributed 
by Chetna Chaudhari via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9019e1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9019e1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9019e1f

Branch: refs/heads/YARN-6592
Commit: a9019e1fb753f15c1927e3f9355996fd6544c14f
Parents: 647b752
Author: Yufei Gu 
Authored: Tue Sep 19 18:27:37 2017 -0700
Committer: Yufei Gu 
Committed: Tue Sep 19 18:28:31 2017 -0700

--
 .../yarn/server/resourcemanager/scheduler/fair/Schedulable.java  | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9019e1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
index 4d6af98..bd1ff7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/Schedulable.java
@@ -41,10 +41,6 @@ import org.apache.hadoop.yarn.api.records.Resource;
  * - updateDemand() is called periodically to compute the demand of the various
  *   jobs and queues, which may be expensive (e.g. jobs must iterate through 
all
  *   their tasks to count failed tasks, tasks that can be speculated, etc).
- * - redistributeShare() is called after demands are updated and a 
Schedulable's
- *   fair share has been set by its parent to let it distribute its share among
- *   the other Schedulables within it (e.g. for queues that want to perform 
fair
- *   sharing among their jobs).
  */
 @Private
 @Unstable


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] hadoop git commit: YARN-6977. Node information is not provided for non am containers in RM logs. (Suma Shivaprasad via wangda)

2017-09-21 Thread asuresh
YARN-6977. Node information is not provided for non am containers in RM logs. 
(Suma Shivaprasad via wangda)

Change-Id: I0c44d09a560446dee2ba68c2b9ae69fce0ec1d3e
(cherry picked from commit 8a42e922fad613f3cf1cc6cb0f3fa72546a9cc56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/958e8c0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/958e8c0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/958e8c0e

Branch: refs/heads/YARN-6592
Commit: 958e8c0e257216c82f68fee726e5280a919da94a
Parents: ef8cd5d
Author: Wangda Tan 
Authored: Fri Sep 15 21:24:11 2017 -0700
Committer: Wangda Tan 
Committed: Fri Sep 15 21:29:31 2017 -0700

--
 .../resourcemanager/scheduler/SchedulerNode.java   |  8 
 .../scheduler/common/fica/FiCaSchedulerNode.java   | 13 +
 .../scheduler/fair/FSSchedulerNode.java| 10 ++
 3 files changed, 23 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 272537c..90fa3e4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -170,14 +170,6 @@ public abstract class SchedulerNode {
 
 launchedContainers.put(container.getId(),
 new ContainerInfo(rmContainer, launchedOnNode));
-
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Assigned container " + container.getId() + " of capacity "
-  + container.getResource() + " on host " + rmNode.getNodeAddress()
-  + ", which has " + numContainers + " containers, "
-  + getAllocatedResource() + " used and " + 
getUnallocatedResource()
-  + " available after allocation");
-}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
index c26a11b..729 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java
@@ -160,4 +160,17 @@ public class FiCaSchedulerNode extends SchedulerNode {
   public synchronized Map getKillableContainers() {
 return Collections.unmodifiableMap(killableContainers);
   }
+
+  protected synchronized void allocateContainer(RMContainer rmContainer,
+  boolean launchedOnNode) {
+super.allocateContainer(rmContainer, launchedOnNode);
+
+final Container container = rmContainer.getContainer();
+LOG.info("Assigned container " + container.getId() + " of capacity "
+  + container.getResource() + " on host " + 
getRMNode().getNodeAddress()
+  + ", which has " + getNumContainers() + " containers, "
+  + getAllocatedResource() + " used and " + getUnallocatedResource()
+  + " available after allocation");
+  }
+
 }


[01/50] hadoop git commit: HADOOP-14853. hadoop-mapreduce-client-app is not a client module (haibochen via rkanter) [Forced Update!]

2017-09-21 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 7d5bd3eb0 -> a0e16d41d (forced update)


HADOOP-14853. hadoop-mapreduce-client-app is not a client module (haibochen via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9b607da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9b607da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9b607da

Branch: refs/heads/YARN-6592
Commit: b9b607daa74322a2928f7671a985a60388b9b9c2
Parents: fbe06b5
Author: Robert Kanter 
Authored: Fri Sep 15 13:43:39 2017 -0700
Committer: Robert Kanter 
Committed: Fri Sep 15 13:53:11 2017 -0700

--
 hadoop-client-modules/hadoop-client/pom.xml | 49 
 1 file changed, 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b607da/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index b802416..bed3f5c 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -131,55 +131,6 @@
 
 
   org.apache.hadoop
-  hadoop-mapreduce-client-app
-  compile
-  
-
-  javax.servlet
-  javax.servlet-api
-
-
-  org.apache.hadoop
-  hadoop-yarn-server-nodemanager
-
-
-  org.apache.hadoop
-  hadoop-yarn-server-web-proxy
-
-
-  org.apache.hadoop
-  hadoop-annotations
-
-
-  com.google.inject.extensions
-  guice-servlet
-
-
-  junit
-  junit
-
-
-  org.apache.avro
-  avro
-
-
-  io.netty
-  netty
-
-
-
-  org.slf4j
-  slf4j-log4j12
-
-
-  org.apache.zookeeper
-  zookeeper
-
-  
-
-
-
-  org.apache.hadoop
   hadoop-yarn-api
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] hadoop git commit: YARN-7203. Add container ExecutionType into ContainerReport. (Botong Huang via asuresh)

2017-09-21 Thread asuresh
YARN-7203. Add container ExecutionType into ContainerReport. (Botong Huang via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56ef5279
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56ef5279
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56ef5279

Branch: refs/heads/YARN-6592
Commit: 56ef5279c1db93d03b2f1e04badbfe804f548918
Parents: 3cf3540
Author: Arun Suresh 
Authored: Mon Sep 18 15:49:31 2017 -0700
Committer: Arun Suresh 
Committed: Mon Sep 18 15:49:31 2017 -0700

--
 .../yarn/api/records/ContainerReport.java   | 26 
 .../src/main/proto/yarn_protos.proto|  1 +
 .../yarn/client/api/impl/TestYarnClient.java|  1 +
 .../records/impl/pb/ContainerReportPBImpl.java  | 20 +++
 .../rmcontainer/RMContainerImpl.java|  2 +-
 5 files changed, 49 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
index 11d7bca..31d2812 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
@@ -52,6 +52,18 @@ public abstract class ContainerReport {
   long creationTime, long finishTime, String diagnosticInfo, String logUrl,
   int containerExitStatus, ContainerState containerState,
   String nodeHttpAddress) {
+return newInstance(containerId, allocatedResource, assignedNode, priority,
+creationTime, finishTime, diagnosticInfo, logUrl, containerExitStatus,
+containerState, nodeHttpAddress, ExecutionType.GUARANTEED);
+  }
+
+  @Private
+  @Unstable
+  public static ContainerReport newInstance(ContainerId containerId,
+  Resource allocatedResource, NodeId assignedNode, Priority priority,
+  long creationTime, long finishTime, String diagnosticInfo, String logUrl,
+  int containerExitStatus, ContainerState containerState,
+  String nodeHttpAddress, ExecutionType executionType) {
 ContainerReport report = Records.newRecord(ContainerReport.class);
 report.setContainerId(containerId);
 report.setAllocatedResource(allocatedResource);
@@ -64,6 +76,7 @@ public abstract class ContainerReport {
 report.setContainerExitStatus(containerExitStatus);
 report.setContainerState(containerState);
 report.setNodeHttpAddress(nodeHttpAddress);
+report.setExecutionType(executionType);
 return report;
   }
 
@@ -209,4 +222,17 @@ public abstract class ContainerReport {
   @Private
   @Unstable
   public abstract void setNodeHttpAddress(String nodeHttpAddress);
+
+  /**
+   * Get the execution type of the container.
+   *
+   * @return the execution type of the container
+   */
+  @Public
+  @Unstable
+  public abstract ExecutionType getExecutionType();
+
+  @Private
+  @Unstable
+  public abstract void setExecutionType(ExecutionType executionType);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 066441c..fb340d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -137,6 +137,7 @@ message ContainerReportProto {
   optional int32 container_exit_status = 9;
   optional ContainerStateProto container_state = 10;
   optional string node_http_address = 11;
+  optional ExecutionTypeProto executionType = 12 [default = GUARANTEED];
 }
 
 enum YarnApplicationStateProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ef5279/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
 

[47/50] hadoop git commit: YARN-7045. Remove FSLeafQueue#addAppSchedulable. (Contributed by Sen Zhao via Yufei Gu)

2017-09-21 Thread asuresh
YARN-7045. Remove FSLeafQueue#addAppSchedulable. (Contributed by Sen Zhao via 
Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a92ef030
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a92ef030
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a92ef030

Branch: refs/heads/YARN-6592
Commit: a92ef030a2707182e90acee644e47c8ef7e1fd8d
Parents: 263e2c6
Author: Yufei Gu 
Authored: Thu Sep 21 12:31:07 2017 -0700
Committer: Yufei Gu 
Committed: Thu Sep 21 12:31:07 2017 -0700

--
 .../resourcemanager/scheduler/fair/FSLeafQueue.java   | 10 --
 .../resourcemanager/scheduler/fair/TestFSLeafQueue.java   |  6 +++---
 .../resourcemanager/scheduler/fair/TestFairScheduler.java |  6 +++---
 3 files changed, 6 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92ef030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 1dcfffc..89ad1e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -94,16 +94,6 @@ public class FSLeafQueue extends FSQueue {
 }
   }
   
-  // for testing
-  void addAppSchedulable(FSAppAttempt appSched) {
-writeLock.lock();
-try {
-  runnableApps.add(appSched);
-} finally {
-  writeLock.unlock();
-}
-  }
-  
   /**
* Removes the given app from this queue.
* @return whether or not the app was runnable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92ef030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
index 2aed9bf..b6b3f7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java
@@ -89,8 +89,8 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
 FSAppAttempt app = mock(FSAppAttempt.class);
 Mockito.when(app.getDemand()).thenReturn(maxResource);
 
-schedulable.addAppSchedulable(app);
-schedulable.addAppSchedulable(app);
+schedulable.addApp(app, true);
+schedulable.addApp(app, true);
 
 schedulable.updateDemand();
 
@@ -165,7 +165,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase {
   @Override
   public void run() {
 for (int i=0; i < 500; i++) {
-  schedulable.addAppSchedulable(app);
+  schedulable.addApp(app, true);
 }
   }
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92ef030/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 0ef4d7b..d5b1fcc 100644
--- 

[36/50] hadoop git commit: HDFS-12473. Change hosts JSON file format.

2017-09-21 Thread asuresh
HDFS-12473. Change hosts JSON file format.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/230b85d5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/230b85d5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/230b85d5

Branch: refs/heads/YARN-6592
Commit: 230b85d5865b7e08fb7aaeab45295b5b966011ef
Parents: 7e58b24
Author: Ming Ma 
Authored: Wed Sep 20 09:03:59 2017 -0700
Committer: Ming Ma 
Committed: Wed Sep 20 09:03:59 2017 -0700

--
 .../hdfs/util/CombinedHostsFileReader.java  | 67 ++--
 .../hdfs/util/CombinedHostsFileWriter.java  | 23 ---
 .../CombinedHostFileManager.java|  3 +-
 .../hdfs/util/TestCombinedHostsFileReader.java  | 44 -
 .../src/test/resources/dfs.hosts.json   | 16 +++--
 .../src/test/resources/legacy.dfs.hosts.json|  7 ++
 6 files changed, 102 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/230b85d5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
index 8da5655..aa8e4c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java
@@ -19,58 +19,85 @@
 package org.apache.hadoop.hdfs.util;
 
 import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.databind.JsonMappingException;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
+
 import java.io.FileInputStream;
 import java.io.InputStreamReader;
 import java.io.IOException;
 import java.io.Reader;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.Set;
-import java.util.HashSet;
+import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
- * Reader support for JSON based datanode configuration, an alternative
+ * Reader support for JSON-based datanode configuration, an alternative format
  * to the exclude/include files configuration.
- * The JSON file format is the array of elements where each element
+ * The JSON file format defines the array of elements where each element
  * in the array describes the properties of a datanode. The properties of
- * a datanode is defined in {@link DatanodeAdminProperties}. For example,
+ * a datanode is defined by {@link DatanodeAdminProperties}. For example,
  *
- * {"hostName": "host1"}
- * {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
- * {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * [
+ *   {"hostName": "host1"},
+ *   {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"},
+ *   {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
+ * ]
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
 @InterfaceStability.Unstable
 public final class CombinedHostsFileReader {
-  private static final ObjectReader READER =
-  new ObjectMapper().readerFor(DatanodeAdminProperties.class);
-  private static final JsonFactory JSON_FACTORY = new JsonFactory();
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(CombinedHostsFileReader.class);
 
   private CombinedHostsFileReader() {
   }
 
   /**
* Deserialize a set of DatanodeAdminProperties from a json file.
-   * @param hostsFile the input json file to read from.
+   * @param hostsFile the input json file to read from
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
-  public static Set
+  public static DatanodeAdminProperties[]
   readFile(final String hostsFile) throws IOException {
-HashSet allDNs = new HashSet<>();
+DatanodeAdminProperties[] allDNs = new DatanodeAdminProperties[0];
+ObjectMapper objectMapper = new ObjectMapper();
+boolean tryOldFormat = false;
 try (Reader input =
- new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
-  Iterator iterator =
-  READER.readValues(JSON_FACTORY.createParser(input));
-  while (iterator.hasNext()) {
-DatanodeAdminProperties properties = iterator.next();
-allDNs.add(properties);
+new InputStreamReader(new 

[48/50] hadoop git commit: YARN-7034. DefaultLinuxContainerRuntime and DockerLinuxContainerRuntime sends client environment variables to container-executor. Contributed by Miklos Szegedi.

2017-09-21 Thread asuresh
YARN-7034. DefaultLinuxContainerRuntime and DockerLinuxContainerRuntime sends 
client environment variables to container-executor. Contributed by Miklos 
Szegedi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5e1851d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5e1851d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5e1851d

Branch: refs/heads/YARN-6592
Commit: e5e1851d803bf8d8b96fec1b5c0058014e9329d0
Parents: a92ef03
Author: Junping Du 
Authored: Thu Sep 21 14:01:16 2017 -0700
Committer: Junping Du 
Committed: Thu Sep 21 14:01:16 2017 -0700

--
 .../privileged/PrivilegedOperationExecutor.java |  2 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |  8 +--
 .../runtime/DockerLinuxContainerRuntime.java|  9 +--
 .../TestLinuxContainerExecutorWithMocks.java| 67 +++-
 .../runtime/TestDockerContainerRuntime.java |  3 +-
 5 files changed, 73 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e1851d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
index 5a3ce74..9f13a5e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperationExecutor.java
@@ -204,7 +204,7 @@ public class PrivilegedOperationExecutor {
   public String executePrivilegedOperation(PrivilegedOperation operation,
   boolean grabOutput) throws PrivilegedOperationException {
 return executePrivilegedOperation(null, operation, null, null, grabOutput,
-true);
+false);
   }
 
   //Utility functions for squashing together operations in supported ways

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e1851d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index d09e4a1..6c3ae85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -80,7 +80,6 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   @Override
   public void launchContainer(ContainerRuntimeContext ctx)
   throws ContainerExecutionException {
-Container container = ctx.getContainer();
 PrivilegedOperation launchOp = new PrivilegedOperation(
 PrivilegedOperation.OperationType.LAUNCH_CONTAINER);
 
@@ -116,8 +115,7 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
 
 try {
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
-launchOp, null, container.getLaunchContext().getEnvironment(),
-false, false);
+launchOp, null, null, false, false);
 } catch (PrivilegedOperationException e) {
   LOG.warn("Launch container failed. Exception: ", e);
 
@@ -129,7 +127,6 @@ public class DefaultLinuxContainerRuntime implements 
LinuxContainerRuntime {
   @Override
   public void 

[15/50] hadoop git commit: YARN-7199. Fix TestAMRMClientContainerRequest.testOpportunisticAndGuaranteedRequests. (Botong Huang via asuresh)

2017-09-21 Thread asuresh
YARN-7199. Fix 
TestAMRMClientContainerRequest.testOpportunisticAndGuaranteedRequests. (Botong 
Huang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29dd5515
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29dd5515
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29dd5515

Branch: refs/heads/YARN-6592
Commit: 29dd55153e37471d9c177f4bd173f1d02bc96410
Parents: 0adc047
Author: Arun Suresh 
Authored: Mon Sep 18 11:26:44 2017 -0700
Committer: Arun Suresh 
Committed: Mon Sep 18 11:26:44 2017 -0700

--
 .../java/org/apache/hadoop/yarn/client/api/AMRMClient.java  | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29dd5515/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
index 815915e..e86bd12 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
@@ -118,8 +118,8 @@ public abstract class AMRMClient extends
 private String nodeLabelsExpression;
 private ExecutionTypeRequest executionTypeRequest =
 ExecutionTypeRequest.newInstance();
-private String resourceProfile;
-
+private String resourceProfile = ProfileCapability.DEFAULT_PROFILE;
+
 /**
  * Instantiates a {@link ContainerRequest} with the given constraints and
  * locality relaxation enabled.
@@ -540,6 +540,11 @@ public abstract class AMRMClient extends
 return this;
   }
 
+  public ContainerRequestBuilder resourceProfile(String resourceProfile) {
+containerRequest.resourceProfile = resourceProfile;
+return this;
+  }
+
   public ContainerRequest build() {
 containerRequest.sanityCheck();
 return containerRequest;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] hadoop git commit: Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay Kumar via Haibo Chen)" HADOOP-14879 Build failure due to failing hadoop-client-check-invariant

2017-09-21 Thread asuresh
Revert "HADOOP-14771. hadoop-client does not include hadoop-yarn-client. (Ajay 
Kumar via Haibo Chen)"
HADOOP-14879 Build failure due to failing hadoop-client-check-invariants
This reverts commit 1ee25278c891e95ba2ab142e5b78aebd752ea163.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa6e8d2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa6e8d2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa6e8d2d

Branch: refs/heads/YARN-6592
Commit: aa6e8d2dff533c3d0c86776567c860548723c21c
Parents: dba7a7d
Author: Steve Loughran 
Authored: Tue Sep 19 11:53:11 2017 +0100
Committer: Steve Loughran 
Committed: Tue Sep 19 11:53:11 2017 +0100

--
 hadoop-client-modules/hadoop-client/pom.xml | 31 
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa6e8d2d/hadoop-client-modules/hadoop-client/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client/pom.xml 
b/hadoop-client-modules/hadoop-client/pom.xml
index 6500ebf..bed3f5c 100644
--- a/hadoop-client-modules/hadoop-client/pom.xml
+++ b/hadoop-client-modules/hadoop-client/pom.xml
@@ -179,37 +179,6 @@
 
 
   org.apache.hadoop
-  hadoop-yarn-client
-  compile
-  
-
-
-  org.apache.hadoop
-  hadoop-yarn-api
-
-
-  org.apache.hadoop
-  hadoop-annotations
-
-
-  com.google.guava
-  guava
-
-
-  commons-cli
-  commons-cli
-
-
-  log4j
-  log4j
-
-  
-
-
-
-  org.apache.hadoop
   hadoop-mapreduce-client-core
   compile
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] hadoop git commit: HDFS-12480. TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in trunk. Contributed by Hanisha Koneru

2017-09-21 Thread asuresh
HDFS-12480. TestNameNodeMetrics#testTransactionAndCheckpointMetrics Fails in 
trunk. Contributed by Hanisha Koneru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31b58406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31b58406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31b58406

Branch: refs/heads/YARN-6592
Commit: 31b58406ac369716ef1665b7d60a3409117bdf9d
Parents: 595d478
Author: Brahma Reddy Battula 
Authored: Tue Sep 19 10:37:07 2017 +0530
Committer: Brahma Reddy Battula 
Committed: Tue Sep 19 10:37:07 2017 +0530

--
 .../namenode/metrics/TestNameNodeMetrics.java | 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31b58406/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 077a5f8..db9adbe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -851,22 +851,22 @@ public class TestNameNodeMetrics {
 getMetrics(NS_METRICS));
 
 assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-assertGauge("LastWrittenTransactionId", 3L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastCheckpoint", 3L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastLogRoll", 3L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
 
 fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
 
 assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastLogRoll", 4L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 5L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastCheckpoint", 5L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastLogRoll", 5L, getMetrics(NS_METRICS));
 
 cluster.getNameNodeRpc().rollEditLog();
 
 assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
-assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
-assertGauge("TransactionsSinceLastCheckpoint", 6L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 7L, getMetrics(NS_METRICS));
+assertGauge("TransactionsSinceLastCheckpoint", 7L, getMetrics(NS_METRICS));
 assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
 
 cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
@@ -876,7 +876,7 @@ public class TestNameNodeMetrics {
 long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
 getMetrics(NS_METRICS));
 assertTrue(lastCkptTime < newLastCkptTime);
-assertGauge("LastWrittenTransactionId", 8L, getMetrics(NS_METRICS));
+assertGauge("LastWrittenTransactionId", 9L, getMetrics(NS_METRICS));
 assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
 assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] hadoop git commit: MAPREDUCE-6954. Disable erasure coding for files that are uploaded to the MR staging area (pbacsko via rkanter)

2017-09-21 Thread asuresh
MAPREDUCE-6954. Disable erasure coding for files that are uploaded to the MR 
staging area (pbacsko via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0adc0471
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0adc0471
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0adc0471

Branch: refs/heads/YARN-6592
Commit: 0adc0471d0c06f66a31060f270dcb50a7b4ffafa
Parents: 5f49668
Author: Robert Kanter 
Authored: Mon Sep 18 10:40:06 2017 -0700
Committer: Robert Kanter 
Committed: Mon Sep 18 10:40:06 2017 -0700

--
 .../hadoop-mapreduce-client-core/pom.xml|  4 ++
 .../hadoop/mapreduce/JobResourceUploader.java   | 17 
 .../apache/hadoop/mapreduce/MRJobConfig.java|  5 +++
 .../src/main/resources/mapred-default.xml   |  9 
 .../mapreduce/TestJobResourceUploader.java  | 46 
 5 files changed, 81 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index c34f7bd..ce5fdc8 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -44,6 +44,10 @@
 
 
   org.apache.hadoop
+  hadoop-hdfs-client
+
+
+  org.apache.hadoop
   hadoop-hdfs
   test
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index f1cad57..d9bf988 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -36,6 +36,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 
@@ -94,6 +96,11 @@ class JobResourceUploader {
 new FsPermission(JobSubmissionFiles.JOB_DIR_PERMISSION);
 mkdirs(jtFs, submitJobDir, mapredSysPerms);
 
+if (!conf.getBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED,
+MRJobConfig.DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED)) {
+  disableErasureCodingForPath(jtFs, submitJobDir);
+}
+
 Collection files = conf.getStringCollection("tmpfiles");
 Collection libjars = conf.getStringCollection("tmpjars");
 Collection archives = conf.getStringCollection("tmparchives");
@@ -575,4 +582,14 @@ class JobResourceUploader {
 }
 return finalPath;
   }
+
+  private void disableErasureCodingForPath(FileSystem fs, Path path)
+  throws IOException {
+if (jtFs instanceof DistributedFileSystem) {
+  LOG.info("Disabling Erasure Coding for path: " + path);
+  DistributedFileSystem dfs = (DistributedFileSystem) jtFs;
+  dfs.setErasureCodingPolicy(path,
+  SystemErasureCodingPolicies.getReplicationPolicy().getName());
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0adc0471/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 2023ba3..86abb42 100644
--- 

[31/50] hadoop git commit: HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong.

2017-09-21 Thread asuresh
HDFS-12445. Correct spellings of choosen to chosen. Contributed by hu xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51edaacd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51edaacd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51edaacd

Branch: refs/heads/YARN-6592
Commit: 51edaacd09d86419f99ca96545a1393db1f43f73
Parents: 59830ca
Author: Andrew Wang 
Authored: Tue Sep 19 13:48:23 2017 -0700
Committer: Andrew Wang 
Committed: Tue Sep 19 13:48:23 2017 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java  | 4 ++--
 .../org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java | 2 +-
 .../java/org/apache/hadoop/examples/dancing/DancingLinks.java| 2 +-
 .../org/apache/hadoop/examples/dancing/DistributedPentomino.java | 4 ++--
 .../main/java/org/apache/hadoop/examples/dancing/Pentomino.java  | 2 +-
 5 files changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f33ec63..0545bb2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3551,8 +3551,8 @@ public class BlockManager implements BlockStatsMXBean {
 List replicasToDelete = replicator
 .chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes,
 addedNode, delNodeHint);
-for (DatanodeStorageInfo choosenReplica : replicasToDelete) {
-  processChosenExcessRedundancy(nonExcess, choosenReplica, storedBlock);
+for (DatanodeStorageInfo chosenReplica : replicasToDelete) {
+  processChosenExcessRedundancy(nonExcess, chosenReplica, storedBlock);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index b6c1318..1860565 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -178,7 +178,7 @@ public class TestDeadDatanode {
 clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7,
 BlockType.CONTIGUOUS, null, null);
 for (DatanodeStorageInfo datanodeStorageInfo : results) {
-  assertFalse("Dead node should not be choosen", datanodeStorageInfo
+  assertFalse("Dead node should not be chosen", datanodeStorageInfo
   .getDatanodeDescriptor().equals(clientNode));
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
index 537b4d4..eef4461 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DancingLinks.java
@@ -368,7 +368,7 @@ public class DancingLinks {
 
   /**
* Make one move from a prefix
-   * @param goalRow the row that should be choosen
+   * @param goalRow the row that should be chosen
* @return the row that was found
*/
   private Node advance(int goalRow) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51edaacd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java
--
diff --git 

[27/50] hadoop git commit: MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. Contributed by Jason Lowe

2017-09-21 Thread asuresh
MAPREDUCE-6958. Shuffle audit logger should log size of shuffle transfer. 
Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a20debd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a20debd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a20debd

Branch: refs/heads/YARN-6592
Commit: 3a20debddeac69596ceb5b36f8413529ea8570e6
Parents: ea845ba
Author: Jason Lowe 
Authored: Tue Sep 19 09:13:17 2017 -0500
Committer: Jason Lowe 
Committed: Tue Sep 19 09:13:17 2017 -0500

--
 .../org/apache/hadoop/mapred/ShuffleHandler.java  | 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a20debd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index 863da7e..b7f2c6d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -992,13 +992,6 @@ public class ShuffleHandler extends AuxiliaryService {
 return;
   }
 
-  // this audit log is disabled by default,
-  // to turn it on please enable this audit log
-  // on log4j.properties by uncommenting the setting
-  if (AUDITLOG.isDebugEnabled()) {
-AUDITLOG.debug("shuffle for " + jobQ.get(0) + " mappers: " + mapIds +
- " reducer " + reduceQ.get(0));
-  }
   int reduceId;
   String jobId;
   try {
@@ -1183,6 +1176,17 @@ public class ShuffleHandler extends AuxiliaryService {
 
   // Now set the response headers.
   setResponseHeaders(response, keepAliveParam, contentLength);
+
+  // this audit log is disabled by default,
+  // to turn it on please enable this audit log
+  // on log4j.properties by uncommenting the setting
+  if (AUDITLOG.isDebugEnabled()) {
+StringBuilder sb = new StringBuilder("shuffle for ");
+sb.append(jobId).append(" reducer ").append(reduce);
+sb.append(" length ").append(contentLength);
+sb.append(" mappers: ").append(mapIds);
+AUDITLOG.debug(sb.toString());
+  }
 }
 
 protected void setResponseHeaders(HttpResponse response,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] hadoop git commit: YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource parameter. (Sen Zhao via wangda)

2017-09-21 Thread asuresh
YARN-7172. ResourceCalculator.fitsIn() should not take a cluster resource 
parameter. (Sen Zhao via wangda)

Change-Id: Icc3670c9381ce7591ca69ec12da5aa52d3612d34


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e81596d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e81596d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e81596d0

Branch: refs/heads/YARN-6592
Commit: e81596d06d226f1cfa44b2390ce3095ed4dee621
Parents: 8d7cc22
Author: Wangda Tan 
Authored: Sun Sep 17 21:20:43 2017 -0700
Committer: Wangda Tan 
Committed: Sun Sep 17 21:20:43 2017 -0700

--
 .../resource/DefaultResourceCalculator.java |  3 +-
 .../resource/DominantResourceCalculator.java|  2 +-
 .../yarn/util/resource/ResourceCalculator.java  |  3 +-
 .../hadoop/yarn/util/resource/Resources.java|  4 +--
 .../util/resource/TestResourceCalculator.java   | 24 +++---
 .../server/resourcemanager/RMServerUtils.java   |  3 +-
 .../CapacitySchedulerPreemptionUtils.java   |  4 +--
 ...QueuePriorityContainerCandidateSelector.java |  5 ++-
 .../ReservedContainerCandidatesSelector.java| 34 +---
 .../scheduler/capacity/AbstractCSQueue.java |  2 +-
 .../allocator/RegularContainerAllocator.java|  8 ++---
 .../scheduler/common/fica/FiCaSchedulerApp.java | 21 +---
 .../scheduler/capacity/TestReservations.java| 20 +---
 13 files changed, 55 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index bdf60bd..7f155e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -123,8 +123,7 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster,
-  Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
 return smaller.getMemorySize() <= bigger.getMemorySize();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index d64f03e..ca828a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -538,7 +538,7 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
   }
 
   @Override
-  public boolean fitsIn(Resource cluster, Resource smaller, Resource bigger) {
+  public boolean fitsIn(Resource smaller, Resource bigger) {
 int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
 for (int i = 0; i < maxLength; i++) {
   ResourceInformation sResourceInformation = smaller

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e81596d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 398dac5..d59560f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -225,8 +225,7 @@ 

[24/50] hadoop git commit: HDFS-12479. Some misuses of lock in DFSStripedOutputStream. Contributed by Huafeng Wang

2017-09-21 Thread asuresh
HDFS-12479. Some misuses of lock in DFSStripedOutputStream. Contributed by 
Huafeng Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dba7a7dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dba7a7dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dba7a7dd

Branch: refs/heads/YARN-6592
Commit: dba7a7dd9d70adfab36a78eb55059c54e553a5cb
Parents: 2018538
Author: Kai Zheng 
Authored: Tue Sep 19 17:45:41 2017 +0800
Committer: Kai Zheng 
Committed: Tue Sep 19 17:45:41 2017 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dba7a7dd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 44db3a6..66eec7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -63,6 +63,7 @@ import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
@@ -85,11 +86,10 @@ public class DFSStripedOutputStream extends DFSOutputStream
 private final List queues;
 
 MultipleBlockingQueue(int numQueue, int queueSize) {
-  List list = new ArrayList<>(numQueue);
+  queues = new ArrayList<>(numQueue);
   for (int i = 0; i < numQueue; i++) {
-list.add(new LinkedBlockingQueue(queueSize));
+queues.add(new LinkedBlockingQueue(queueSize));
   }
-  queues = Collections.synchronizedList(list);
 }
 
 void offer(int i, T object) {
@@ -156,8 +156,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
   followingBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
   endBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
   newBlocks = new MultipleBlockingQueue<>(numAllBlocks, 1);
-  updateStreamerMap = Collections.synchronizedMap(
-  new HashMap(numAllBlocks));
+  updateStreamerMap = new ConcurrentHashMap<>(numAllBlocks);
   streamerUpdateResult = new MultipleBlockingQueue<>(numAllBlocks, 1);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] hadoop git commit: HDFS-12323. NameNode terminates after full GC thinking QJM unresponsive if full GC is much longer than timeout. Contributed by Erik Krogen.

2017-09-21 Thread asuresh
HDFS-12323. NameNode terminates after full GC thinking QJM unresponsive if full 
GC is much longer than timeout. Contributed by Erik Krogen.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90894c72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90894c72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90894c72

Branch: refs/heads/YARN-6592
Commit: 90894c7262df0243e795b675f3ac9f7b322ccd11
Parents: b9b607d
Author: Erik Krogen 
Authored: Thu Sep 14 15:53:33 2017 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 15 13:56:27 2017 -0700

--
 .../hadoop/hdfs/qjournal/client/QuorumCall.java | 65 
 .../hdfs/qjournal/client/TestQuorumCall.java| 31 +-
 2 files changed, 82 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90894c72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
index dc32318..dee74e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java
@@ -24,7 +24,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StopWatch;
-import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Timer;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -35,6 +35,7 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.protobuf.Message;
 import com.google.protobuf.TextFormat;
 
+
 /**
  * Represents a set of calls for which a quorum of results is needed.
  * @param  a key used to identify each of the outgoing calls
@@ -60,11 +61,12 @@ class QuorumCall {
* fraction of the configured timeout for any call.
*/
   private static final float WAIT_PROGRESS_WARN_THRESHOLD = 0.7f;
-  private final StopWatch quorumStopWatch = new StopWatch();
+  private final StopWatch quorumStopWatch;
+  private final Timer timer;
   
   static  QuorumCall create(
-  Map calls) {
-final QuorumCall qr = new QuorumCall();
+  Map calls, Timer timer) {
+final QuorumCall qr = new QuorumCall(timer);
 for (final Entry e : 
calls.entrySet()) {
   Preconditions.checkArgument(e.getValue() != null,
   "null future for key: " + e.getKey());
@@ -82,18 +84,53 @@ class QuorumCall {
 }
 return qr;
   }
-  
+
+  static  QuorumCall create(
+  Map calls) {
+return create(calls, new Timer());
+  }
+
+  /**
+   * Not intended for outside use.
+   */
   private QuorumCall() {
+this(new Timer());
+  }
+
+  private QuorumCall(Timer timer) {
 // Only instantiated from factory method above
+this.timer = timer;
+this.quorumStopWatch = new StopWatch(timer);
   }
 
+  /**
+   * Used in conjunction with {@link #getQuorumTimeoutIncreaseMillis(long, 
int)}
+   * to check for pauses.
+   */
   private void restartQuorumStopWatch() {
 quorumStopWatch.reset().start();
   }
 
-  private boolean shouldIncreaseQuorumTimeout(long offset, int millis) {
+  /**
+   * Check for a pause (e.g. GC) since the last time
+   * {@link #restartQuorumStopWatch()} was called. If detected, return the
+   * length of the pause; else, -1.
+   * @param offset Offset the elapsed time by this amount; use if some amount
+   *   of pause was expected
+   * @param millis Total length of timeout in milliseconds
+   * @return Length of pause, if detected, else -1
+   */
+  private long getQuorumTimeoutIncreaseMillis(long offset, int millis) {
 long elapsed = quorumStopWatch.now(TimeUnit.MILLISECONDS);
-return elapsed + offset > (millis * WAIT_PROGRESS_INFO_THRESHOLD);
+long pauseTime = elapsed + offset;
+if (pauseTime > (millis * WAIT_PROGRESS_INFO_THRESHOLD)) {
+  QuorumJournalManager.LOG.info("Pause detected while waiting for " +
+  "QuorumCall response; increasing timeout threshold by pause time " +
+  "of " + pauseTime + " ms.");
+  return pauseTime;
+} else {
+  return -1;
+}
   }
 
   
@@ -119,7 +156,7 @@ class QuorumCall

[6/8] hadoop git commit: YARN-6923. Metrics for Federation Router. (Giovanni Matteo Fumarola via asuresh)

2017-09-21 Thread curino
YARN-6923. Metrics for Federation Router. (Giovanni Matteo Fumarola via asuresh)

(cherry picked from commit ae8fb13b312b30de50d65b5450b565d50d690e9e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2aacb9d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2aacb9d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2aacb9d3

Branch: refs/heads/branch-2
Commit: 2aacb9d3fbf21308daff828639be10acbcd5e5cc
Parents: ac090b3
Author: Arun Suresh 
Authored: Mon Aug 21 22:50:24 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:09:37 2017 -0700

--
 .../yarn/server/router/RouterMetrics.java   | 203 +++
 .../clientrm/FederationClientInterceptor.java   |  37 ++-
 .../webapp/FederationInterceptorREST.java   | 116 +++--
 .../yarn/server/router/TestRouterMetrics.java   | 248 +++
 .../webapp/TestFederationInterceptorREST.java   |  12 +-
 5 files changed, 593 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aacb9d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
new file mode 100644
index 000..42361a3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.router;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsInfo;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.*;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.apache.hadoop.metrics2.lib.Interns.info;
+
+/**
+ * This class is for maintaining the various Router Federation Interceptor
+ * activity statistics and publishing them through the metrics interfaces.
+ */
+@InterfaceAudience.Private
+@Metrics(about = "Metrics for Router Federation Interceptor", context = "fedr")
+public final class RouterMetrics {
+
+  private static final MetricsInfo RECORD_INFO =
+  info("RouterMetrics", "Router Federation Interceptor");
+  private static AtomicBoolean isInitialized = new AtomicBoolean(false);
+
+  // Metrics for operation failed
+  @Metric("# of applications failed to be submitted")
+  private MutableGaugeInt numAppsFailedSubmitted;
+  @Metric("# of applications failed to be created")
+  private MutableGaugeInt numAppsFailedCreated;
+  @Metric("# of applications failed to be killed")
+  private MutableGaugeInt numAppsFailedKilled;
+  @Metric("# of application reports failed to be retrieved")
+  private MutableGaugeInt numAppsFailedRetrieved;
+
+  // Aggregate metrics are shared, and don't have to be looked up per call
+  @Metric("Total number of successful Submitted apps and latency(ms)")
+  private MutableRate totalSucceededAppsSubmitted;
+  @Metric("Total number of successful Killed apps and latency(ms)")
+  private MutableRate totalSucceededAppsKilled;
+  @Metric("Total number of successful Created apps and latency(ms)")
+  private MutableRate totalSucceededAppsCreated;
+  @Metric("Total number of successful Retrieved app reports and latency(ms)")
+  private MutableRate totalSucceededAppsRetrieved;
+
+  /**
+   * Provide quantile counters for all latencies.
+   */
+  private MutableQuantiles submitApplicationLatency;
+  private 

[4/8] hadoop git commit: YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo Goiri via Subru).

2017-09-21 Thread curino
YARN-6900. ZooKeeper based implementation of the FederationStateStore. (Íñigo 
Goiri via Subru).

(cherry picked from commit de462da04e167a04b89ecf0f40d464cf39dc6549)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/261f769d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/261f769d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/261f769d

Branch: refs/heads/branch-2
Commit: 261f769d797b61839b40873f0df13aa58e86f3f9
Parents: 9ad067e
Author: Subru Krishnan 
Authored: Wed Aug 16 11:43:24 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:09:23 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +
 .../hadoop-yarn-server-common/pom.xml   |   5 +
 .../impl/ZookeeperFederationStateStore.java | 634 +++
 .../impl/TestZookeeperFederationStateStore.java |  89 +++
 .../TestFederationStateStoreFacadeRetry.java|  20 +-
 .../src/site/markdown/Federation.md |  56 +-
 7 files changed, 785 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/261f769d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c34c076..bf18ade 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2561,6 +2561,14 @@ public class YarnConfiguration extends Configuration {
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS = "";
 
+  public static final String FEDERATION_STATESTORE_ZK_PREFIX =
+  FEDERATION_PREFIX + "zk-state-store.";
+  /** Parent znode path under which ZKRMStateStore will create znodes. */
+  public static final String FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  FEDERATION_STATESTORE_ZK_PREFIX + "parent-path";
+  public static final String DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH =
+  "/federationstore";
+
   private static final String FEDERATION_STATESTORE_SQL_PREFIX =
   FEDERATION_PREFIX + "state-store.sql.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/261f769d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index d6c619d..ad38051 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -98,6 +98,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS);
 
+// Federation StateStore ZK implementation configs to be ignored
+configurationPropsToSkipCompare.add(
+YarnConfiguration.FEDERATION_STATESTORE_ZK_PARENT_PATH);
+
 // Federation StateStore SQL implementation configs to be ignored
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_SQL_JDBC_CLASS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/261f769d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 8c754fa..f484e35 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -137,6 +137,11 @@
 
   
 
+
+  org.apache.curator
+  curator-test
+  test
+
   
 
   


[5/8] hadoop git commit: YARN-5603. Metrics for Federation StateStore. (Ellen Hui via asuresh)

2017-09-21 Thread curino
YARN-5603. Metrics for Federation StateStore. (Ellen Hui via asuresh)

(cherry picked from commit 75abc9a8e2cf1c7d2c574ede720df59421512be3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac090b38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac090b38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac090b38

Branch: refs/heads/branch-2
Commit: ac090b38ad54f78f59ec2ec0f73c6c4d7664d4cb
Parents: 261f769
Author: Arun Suresh 
Authored: Mon Aug 21 22:43:08 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:09:30 2017 -0700

--
 .../store/impl/SQLFederationStateStore.java |  79 
 .../FederationStateStoreClientMetrics.java  | 184 +++
 .../federation/store/metrics/package-info.java  |  17 ++
 .../TestFederationStateStoreClientMetrics.java  | 146 +++
 4 files changed, 426 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac090b38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
index 63d8e42..533f9c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
 import 
org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreInvalidInputException;
+import 
org.apache.hadoop.yarn.server.federation.store.metrics.FederationStateStoreClientMetrics;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
 import 
org.apache.hadoop.yarn.server.federation.store.records.ApplicationHomeSubCluster;
@@ -72,6 +73,8 @@ import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationMembership
 import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationPolicyStoreInputValidator;
 import 
org.apache.hadoop.yarn.server.federation.store.utils.FederationStateStoreUtils;
 import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.MonotonicClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -137,6 +140,7 @@ public class SQLFederationStateStore implements 
FederationStateStore {
   private String url;
   private int maximumPoolSize;
   private HikariDataSource dataSource = null;
+  private final Clock clock = new MonotonicClock();
 
   @Override
   public void init(Configuration conf) throws YarnException {
@@ -203,7 +207,9 @@ public class SQLFederationStateStore implements 
FederationStateStore {
   cstmt.registerOutParameter(9, java.sql.Types.INTEGER);
 
   // Execute the query
+  long startTime = clock.getTime();
   cstmt.executeUpdate();
+  long stopTime = clock.getTime();
 
   // Check the ROWCOUNT value, if it is equal to 0 it means the call
   // did not add a new subcluster into FederationStateStore
@@ -222,8 +228,11 @@ public class SQLFederationStateStore implements 
FederationStateStore {
 
   LOG.info(
   "Registered the SubCluster " + subClusterId + " into the 
StateStore");
+  FederationStateStoreClientMetrics
+  .succeededStateStoreCall(stopTime - startTime);
 
 } catch (SQLException e) {
+  FederationStateStoreClientMetrics.failedStateStoreCall();
   FederationStateStoreUtils.logAndThrowRetriableException(LOG,
   "Unable to register the SubCluster " + subClusterId
   + " into the StateStore",
@@ -260,7 +269,9 @@ public class SQLFederationStateStore implements 
FederationStateStore {
   cstmt.registerOutParameter(3, java.sql.Types.INTEGER);
 
   // Execute the query
+  long startTime = clock.getTime();
   cstmt.executeUpdate();
+  long stopTime = clock.getTime();
 

[1/8] hadoop git commit: YARN-6970. Add PoolInitializationException as retriable exception in FederationFacade. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d11be2dca -> 7cd9018b1


YARN-6970. Add PoolInitializationException as retriable exception in 
FederationFacade. (Giovanni Matteo Fumarola via Subru).

(cherry picked from commit ad2a3506626728a6be47af0db3ca60610a568734)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1ee4ad7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1ee4ad7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1ee4ad7

Branch: refs/heads/branch-2
Commit: a1ee4ad77f964e43ff8005729327d2a0fed6fa04
Parents: d11be2d
Author: Subru Krishnan 
Authored: Tue Aug 8 16:48:29 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:08:30 2017 -0700

--
 .../utils/FederationStateStoreFacade.java   |  2 ++
 .../TestFederationStateStoreFacadeRetry.java| 24 
 2 files changed, 26 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1ee4ad7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
index 389c769..682eb14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/utils/FederationStateStoreFacade.java
@@ -70,6 +70,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException;
 
 /**
  *
@@ -162,6 +163,7 @@ public final class FederationStateStoreFacade {
 exceptionToPolicyMap.put(FederationStateStoreRetriableException.class,
 basePolicy);
 exceptionToPolicyMap.put(CacheLoaderException.class, basePolicy);
+exceptionToPolicyMap.put(PoolInitializationException.class, basePolicy);
 
 RetryPolicy retryPolicy = RetryPolicies.retryByException(
 RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1ee4ad7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
index 304910e..ea43268 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/utils/TestFederationStateStoreFacadeRetry.java
@@ -30,6 +30,8 @@ import 
org.apache.hadoop.yarn.server.federation.store.exception.FederationStateS
 import org.junit.Assert;
 import org.junit.Test;
 
+import com.zaxxer.hikari.pool.HikariPool.PoolInitializationException;
+
 /**
  * Test class to validate FederationStateStoreFacade retry policy.
  */
@@ -119,4 +121,26 @@ public class TestFederationStateStoreFacadeRetry {
 policy.shouldRetry(new CacheLoaderException(""), maxRetries, 0, false);
 Assert.assertEquals(RetryAction.FAIL.action, action.action);
   }
+
+  /*
+   * Test to validate that PoolInitializationException is a retriable 
exception.
+   */
+  @Test
+  public void testFacadePoolInitRetriableException() throws Exception {
+// PoolInitializationException is a retriable exception
+conf = new Configuration();
+conf.setInt(YarnConfiguration.CLIENT_FAILOVER_RETRIES, maxRetries);
+RetryPolicy policy = FederationStateStoreFacade.createRetryPolicy(conf);
+RetryAction action = policy.shouldRetry(
+new PoolInitializationException(new YarnException()), 0, 0, false);
+// 

[8/8] hadoop git commit: [YARN FEDERATION BACKPORT] Fixing more Java 1.7 compilation issues

2017-09-21 Thread curino
[YARN FEDERATION BACKPORT] Fixing more Java 1.7 compilation issues


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7cd9018b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7cd9018b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7cd9018b

Branch: refs/heads/branch-2
Commit: 7cd9018b136f5f6de7bb819909d74e4cab8fb19d
Parents: 88b32ed
Author: Carlo Curino 
Authored: Thu Sep 21 18:19:36 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:19:36 2017 -0700

--
 .../server/router/webapp/FederationInterceptorREST.java | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7cd9018b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 3a91e35..15caf0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -622,11 +622,13 @@ public class FederationInterceptorREST extends 
AbstractRESTRequestInterceptor {
* operation.
*/
   @Override
-  public AppsInfo getApps(HttpServletRequest hsr, String stateQuery,
-  Set statesQuery, String finalStatusQuery, String userQuery,
-  String queueQuery, String count, String startedBegin, String startedEnd,
-  String finishBegin, String finishEnd, Set applicationTypes,
-  Set applicationTags, Set unselectedFields) {
+  public AppsInfo getApps(final HttpServletRequest hsr, final String 
stateQuery,
+  final Set statesQuery, final String finalStatusQuery,
+  final String userQuery, final String queueQuery, final String count,
+  final String startedBegin, final String startedEnd,
+  final String finishBegin, final String finishEnd,
+  final Set applicationTypes, final Set applicationTags,
+  final Set unselectedFields) {
 AppsInfo apps = new AppsInfo();
 long startTime = clock.getTime();
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[7/8] hadoop git commit: YARN-7010. Federation: routing REST invocations transparently to multiple RMs (part 2 - getApps). (Contributed by Giovanni Matteo Fumarola via curino)

2017-09-21 Thread curino
YARN-7010. Federation: routing REST invocations transparently to multiple RMs 
(part 2 - getApps). (Contributed by Giovanni Matteo Fumarola via curino)

(cherry picked from commit cc8893edc0b7960e958723c81062986c12f06ade)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88b32edb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88b32edb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88b32edb

Branch: refs/heads/branch-2
Commit: 88b32edb8fb49bc87e5e56f4cce28c8358eae398
Parents: 2aacb9d
Author: Carlo Curino 
Authored: Tue Aug 29 14:53:09 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:09:44 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   2 +
 .../server/uam/UnmanagedApplicationManager.java |   2 +-
 .../resourcemanager/webapp/dao/AppInfo.java | 184 +++
 .../resourcemanager/webapp/dao/AppsInfo.java|   4 +
 .../yarn/server/router/RouterMetrics.java   |  33 ++
 .../webapp/FederationInterceptorREST.java   | 118 ++-
 .../router/webapp/RouterWebServiceUtil.java | 109 ++-
 .../yarn/server/router/TestRouterMetrics.java   |  50 +++
 .../MockDefaultRequestInterceptorREST.java  |  49 ++-
 .../webapp/TestFederationInterceptorREST.java   |  17 +
 .../TestFederationInterceptorRESTRetry.java |  45 +++
 .../router/webapp/TestRouterWebServiceUtil.java | 311 +++
 13 files changed, 855 insertions(+), 78 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88b32edb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bf18ade..0f05f1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2671,6 +2671,15 @@ public class YarnConfiguration extends Configuration {
   "org.apache.hadoop.yarn.server.router.webapp."
   + "DefaultRequestInterceptorREST";
 
+  /**
+   * The interceptor class used in FederationInterceptorREST should return
+   * partial AppReports.
+   */
+  public static final String ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
+  ROUTER_WEBAPP_PREFIX + "partial-result.enabled";
+  public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED =
+  false;
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88b32edb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index ad38051..6cb92f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -158,6 +158,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 
 configurationPrefixToSkipCompare
 .add(YarnConfiguration.ROUTER_CLIENTRM_SUBMIT_RETRY);
+configurationPrefixToSkipCompare
+.add(YarnConfiguration.ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED);
 
 // Set by container-executor.cfg
 configurationPrefixToSkipCompare.add(YarnConfiguration.NM_USER_HOME_DIR);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88b32edb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java
index 60a9a27..6531a75 100644

[3/8] hadoop git commit: YARN-6996. Change javax.cache library implementation from JSR107 to Apache Geronimo. (Ray Chiang via Subru).

2017-09-21 Thread curino
YARN-6996. Change javax.cache library implementation from JSR107 to Apache 
Geronimo. (Ray Chiang via Subru).

(cherry picked from commit 18f3603bce37e0e07c9075811b1179afc2c227eb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ad067ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ad067ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ad067ef

Branch: refs/heads/branch-2
Commit: 9ad067efe95b738f7d7ed886d94121d5b806be96
Parents: 8220b19
Author: Subru Krishnan 
Authored: Mon Aug 14 11:10:00 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:09:16 2017 -0700

--
 hadoop-project/pom.xml | 6 +++---
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml   | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ad067ef/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6b24a75..9918af5 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -88,7 +88,7 @@
 
 2.0.0-M15
 
-1.0.0
+1.0-alpha-1
 3.3.1
 2.4.12
 6.2.1.jre7
@@ -1086,8 +1086,8 @@
 1.3.0
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
   ${jcache.version}
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ad067ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 7722f4f..8c754fa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -110,8 +110,8 @@
   leveldbjni-all
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
 
 
   org.ehcache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/8] hadoop git commit: YARN-6896. Federation: routing REST invocations transparently to multiple RMs (part 1 - basic execution). (Contributed by Giovanni Matteo Fumarola via curino)

2017-09-21 Thread curino
YARN-6896. Federation: routing REST invocations transparently to multiple RMs 
(part 1 - basic execution). (Contributed by Giovanni Matteo Fumarola via curino)

(cherry picked from commit cc59b5fb26ccf58dffcd8850fa12ec65250f127d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8220b19a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8220b19a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8220b19a

Branch: refs/heads/branch-2
Commit: 8220b19af70744b27c265d604ab3993e0c7659c5
Parents: a1ee4ad
Author: Carlo Curino 
Authored: Fri Aug 11 15:58:01 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 18:09:07 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   2 +
 .../webapp/DefaultRequestInterceptorREST.java   |  16 +-
 .../webapp/FederationInterceptorREST.java   | 750 +++
 .../webapp/BaseRouterWebServicesTest.java   |  37 +-
 .../MockDefaultRequestInterceptorREST.java  | 136 
 .../webapp/TestFederationInterceptorREST.java   | 379 ++
 .../TestFederationInterceptorRESTRetry.java | 274 +++
 .../TestableFederationInterceptorREST.java  |  54 ++
 .../src/site/markdown/Federation.md |   2 +-
 10 files changed, 1646 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8220b19a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 34374cf..c34c076 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2653,6 +2653,16 @@ public class YarnConfiguration extends Configuration {
   "org.apache.hadoop.yarn.server.router.webapp."
   + "DefaultRequestInterceptorREST";
 
+  /**
+   * The interceptor class used in FederationInterceptorREST to communicate 
with
+   * each SubCluster.
+   */
+  public static final String ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS =
+  ROUTER_WEBAPP_PREFIX + "default-interceptor-class";
+  public static final String DEFAULT_ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS =
+  "org.apache.hadoop.yarn.server.router.webapp."
+  + "DefaultRequestInterceptorREST";
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8220b19a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 56fb578..d6c619d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -83,6 +83,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.ROUTER_CLIENTRM_ADDRESS);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.ROUTER_RMADMIN_ADDRESS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS);
 
 // Federation policies configs to be ignored
 configurationPropsToSkipCompare

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8220b19a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
index aa8e3eb..abd8ca6 100644
--- 

[03/50] [abbrv] hadoop git commit: YARN-5601. Make the RM epoch base value configurable. Contributed by Subru Krishnan

2017-09-21 Thread curino
YARN-5601. Make the RM epoch base value configurable. Contributed by Subru 
Krishnan

(cherry picked from commit 9ca2aba9cc65090162b3517b194b5e655ee4a157)
(cherry picked from commit 2797507d51566ab3b8328e5fb1d0beb9fbce5bae)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aac87551
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aac87551
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aac87551

Branch: refs/heads/branch-2
Commit: aac875512503130ad8e8b4e555caef25565c110a
Parents: 8409fef
Author: Jian He 
Authored: Fri Sep 2 12:23:57 2016 +0800
Committer: Carlo Curino 
Committed: Thu Sep 21 16:23:40 2017 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml | 5 -
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java  | 3 +++
 .../apache/hadoop/yarn/conf/TestYarnConfigurationFields.java | 2 ++
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java  | 7 +++
 .../resourcemanager/recovery/FileSystemRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/LeveldbRMStateStore.java | 2 +-
 .../server/resourcemanager/recovery/MemoryRMStateStore.java  | 1 +
 .../yarn/server/resourcemanager/recovery/RMStateStore.java   | 4 
 .../yarn/server/resourcemanager/recovery/ZKRMStateStore.java | 2 +-
 .../resourcemanager/recovery/RMStateStoreTestBase.java   | 8 +---
 .../server/resourcemanager/recovery/TestFSRMStateStore.java  | 1 +
 .../resourcemanager/recovery/TestLeveldbRMStateStore.java| 1 +
 .../server/resourcemanager/recovery/TestZKRMStateStore.java  | 1 +
 13 files changed, 32 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aac87551/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 6c1d014..39305ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -276,7 +276,10 @@
   
   
 
-
+
+  
+  
+
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aac87551/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c9db167..66bc377 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -149,6 +149,9 @@ public class YarnConfiguration extends Configuration {
 
   public static final String RM_HOSTNAME = RM_PREFIX + "hostname";
 
+  public static final String RM_EPOCH = RM_PREFIX + "epoch";
+  public static final long DEFAULT_RM_EPOCH = 0L;
+
   /** The address of the applications manager interface in the RM.*/
   public static final String RM_ADDRESS = 
 RM_PREFIX + "address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aac87551/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 38e2668..c5a279d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -77,6 +77,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.RM_EPOCH);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"


[43/50] [abbrv] hadoop git commit: YARN-6866. Minor clean-up and fixes in anticipation of YARN-2915 merge with trunk. (Botong Huang via Subru).

2017-09-21 Thread curino
YARN-6866. Minor clean-up and fixes in anticipation of YARN-2915 merge with 
trunk. (Botong Huang via Subru).

(cherry picked from commit 40453879ec860819b080ccc82454480a436c0adc)
(cherry picked from commit 86f05c33125af3b435d6793551680ffa35047aa7)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/049f7c84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/049f7c84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/049f7c84

Branch: refs/heads/branch-2
Commit: 049f7c84a1dae5576012c69043bb98d073c85368
Parents: 66980c0
Author: Subru Krishnan 
Authored: Tue Jul 25 20:22:45 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:55:43 2017 -0700

--
 hadoop-project/pom.xml |  6 +++---
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java |  4 ++--
 .../hadoop/yarn/conf/TestYarnConfigurationFields.java  |  4 
 .../router/clientrm/FederationClientInterceptor.java   | 13 +++--
 .../hadoop-yarn-site/src/site/markdown/Federation.md   |  8 
 5 files changed, 16 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/049f7c84/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index d61dc4b..6b24a75 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -89,9 +89,9 @@
 2.0.0-M15
 
 1.0.0
-3.0.3
-2.4.11
-6.1.0.jre7
+3.3.1
+2.4.12
+6.2.1.jre7
 
 
 1.7

http://git-wip-us.apache.org/repos/asf/hadoop/blob/049f7c84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 3600709..7adfdf1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2592,7 +2592,7 @@ public class YarnConfiguration extends Configuration {
   ROUTER_PREFIX + "clientrm.";
 
   public static final String ROUTER_CLIENTRM_ADDRESS =
-  ROUTER_CLIENTRM_PREFIX + ".address";
+  ROUTER_CLIENTRM_PREFIX + "address";
   public static final int DEFAULT_ROUTER_CLIENTRM_PORT = 8050;
   public static final String DEFAULT_ROUTER_CLIENTRM_ADDRESS =
   "0.0.0.0:" + DEFAULT_ROUTER_CLIENTRM_PORT;
@@ -2610,7 +2610,7 @@ public class YarnConfiguration extends Configuration {
   public static final String ROUTER_RMADMIN_PREFIX = ROUTER_PREFIX + 
"rmadmin.";
 
   public static final String ROUTER_RMADMIN_ADDRESS =
-  ROUTER_RMADMIN_PREFIX + ".address";
+  ROUTER_RMADMIN_PREFIX + "address";
   public static final int DEFAULT_ROUTER_RMADMIN_PORT = 8052;
   public static final String DEFAULT_ROUTER_RMADMIN_ADDRESS =
   "0.0.0.0:" + DEFAULT_ROUTER_RMADMIN_PORT;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/049f7c84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 910df9e..56fb578 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -79,6 +79,10 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.RM_EPOCH);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.ROUTER_CLIENTRM_ADDRESS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.ROUTER_RMADMIN_ADDRESS);
 
 // Federation policies configs to be ignored
 configurationPropsToSkipCompare


[28/50] [abbrv] hadoop git commit: YARN-5413. Create a proxy chain for ResourceManager Admin API in the Router. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
YARN-5413. Create a proxy chain for ResourceManager Admin API in the Router. 
(Giovanni Matteo Fumarola via Subru).

(cherry picked from commit 67846a5519b5905c2d925cf4c602f715b653e72c)
(cherry picked from commit 706d6d48846d06b04e82587885bb8632a16c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f00f938
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f00f938
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f00f938

Branch: refs/heads/branch-2
Commit: 7f00f938f253d87153c2b8a3938daa3b99cb6f42
Parents: dc0a2e6
Author: Subru Krishnan 
Authored: Tue May 9 19:19:27 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:47:14 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  22 +-
 .../src/main/resources/yarn-default.xml |  21 +
 .../hadoop/yarn/util/TestLRUCacheHashMap.java   |   2 +-
 .../yarn/server/MockResourceManagerFacade.java  |   7 +-
 .../hadoop/yarn/server/router/Router.java   |  10 +
 .../AbstractClientRequestInterceptor.java   |  11 +-
 .../DefaultClientRequestInterceptor.java|   2 +-
 .../router/clientrm/RouterClientRMService.java  |  16 +-
 .../AbstractRMAdminRequestInterceptor.java  |  90 
 .../DefaultRMAdminRequestInterceptor.java   | 215 ++
 .../rmadmin/RMAdminRequestInterceptor.java  |  65 +++
 .../router/rmadmin/RouterRMAdminService.java| 423 +++
 .../server/router/rmadmin/package-info.java |  20 +
 .../router/clientrm/BaseRouterClientRMTest.java |   2 +-
 .../router/rmadmin/BaseRouterRMAdminTest.java   | 346 +++
 .../rmadmin/MockRMAdminRequestInterceptor.java  |  36 ++
 .../PassThroughRMAdminRequestInterceptor.java   | 148 +++
 .../rmadmin/TestRouterRMAdminService.java   | 219 ++
 18 files changed, 1635 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f00f938/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a3b53d6..9e8a5ad 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2586,6 +2586,8 @@ public class YarnConfiguration extends Configuration {
 
   public static final String ROUTER_PREFIX = YARN_PREFIX + "router.";
 
+  public static final String ROUTER_BIND_HOST = ROUTER_PREFIX + "bind-host";
+
   public static final String ROUTER_CLIENTRM_PREFIX =
   ROUTER_PREFIX + "clientrm.";
 
@@ -2601,9 +2603,23 @@ public class YarnConfiguration extends Configuration {
   "org.apache.hadoop.yarn.server.router.clientrm."
   + "DefaultClientRequestInterceptor";
 
-  public static final String ROUTER_CLIENTRM_PIPELINE_CACHE_MAX_SIZE =
-  ROUTER_CLIENTRM_PREFIX + "cache-max-size";
-  public static final int DEFAULT_ROUTER_CLIENTRM_PIPELINE_CACHE_MAX_SIZE = 25;
+  public static final String ROUTER_PIPELINE_CACHE_MAX_SIZE =
+  ROUTER_PREFIX + "pipeline.cache-max-size";
+  public static final int DEFAULT_ROUTER_PIPELINE_CACHE_MAX_SIZE = 25;
+
+  public static final String ROUTER_RMADMIN_PREFIX = ROUTER_PREFIX + 
"rmadmin.";
+
+  public static final String ROUTER_RMADMIN_ADDRESS =
+  ROUTER_RMADMIN_PREFIX + ".address";
+  public static final int DEFAULT_ROUTER_RMADMIN_PORT = 8052;
+  public static final String DEFAULT_ROUTER_RMADMIN_ADDRESS =
+  "0.0.0.0:" + DEFAULT_ROUTER_RMADMIN_PORT;
+
+  public static final String ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE =
+  ROUTER_RMADMIN_PREFIX + "interceptor-class.pipeline";
+  public static final String DEFAULT_ROUTER_RMADMIN_INTERCEPTOR_CLASS =
+  "org.apache.hadoop.yarn.server.router.rmadmin."
+  + "DefaultRMAdminRequestInterceptor";
 
   
   // Other Configs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f00f938/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 5d53e14..998e4cb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml

[06/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-09-21 Thread curino
YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via 
Subru).

(cherry picked from commit 11c5336522d3504598fb94eee288d54df73418c6)
(cherry picked from commit 1dadd0b45a6a605da72eb304808edb49fc66da45)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c5ab53f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c5ab53f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c5ab53f

Branch: refs/heads/branch-2
Commit: 5c5ab53f3daddc79e47bd14829cbf0b3a4afc7ca
Parents: f792f7b
Author: Subru Krishnan 
Authored: Thu Oct 13 17:59:13 2016 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:23:42 2017 -0700

--
 .../AbstractConfigurableFederationPolicy.java   | 155 +
 .../policies/ConfigurableFederationPolicy.java  |   9 +-
 .../FederationPolicyInitializationContext.java  |  37 +-
 ...ionPolicyInitializationContextValidator.java |  28 +-
 .../policies/FederationPolicyManager.java   |  59 +-
 .../amrmproxy/AbstractAMRMProxyPolicy.java  |  47 ++
 .../amrmproxy/BroadcastAMRMProxyPolicy.java |  85 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  25 +-
 .../LocalityMulticastAMRMProxyPolicy.java   | 583 +++
 .../policies/amrmproxy/package-info.java|   1 -
 .../policies/dao/WeightedPolicyInfo.java| 180 +++---
 .../federation/policies/dao/package-info.java   |   1 -
 .../policies/exceptions/package-info.java   |   1 -
 .../federation/policies/package-info.java   |   1 -
 .../policies/router/AbstractRouterPolicy.java   |  47 ++
 .../router/BaseWeightedRouterPolicy.java| 150 -
 .../policies/router/FederationRouterPolicy.java |   5 +-
 .../policies/router/LoadBasedRouterPolicy.java  |  36 +-
 .../policies/router/PriorityRouterPolicy.java   |  19 +-
 .../router/UniformRandomRouterPolicy.java   |  28 +-
 .../router/WeightedRandomRouterPolicy.java  |  32 +-
 .../policies/router/package-info.java   |   1 -
 .../resolver/AbstractSubClusterResolver.java|   4 +-
 .../policies/BaseFederationPoliciesTest.java|  28 +-
 ...ionPolicyInitializationContextValidator.java |  25 +-
 .../TestBroadcastAMRMProxyFederationPolicy.java | 112 
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 566 ++
 .../router/TestLoadBasedRouterPolicy.java   |  18 +-
 .../router/TestPriorityRouterPolicy.java|  15 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  35 +-
 .../utils/FederationPoliciesTestUtil.java   |  64 ++
 .../src/test/resources/nodes|   6 +-
 32 files changed, 1950 insertions(+), 453 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5ab53f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
new file mode 100644
index 000..4cb9bbe
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractConfigurableFederationPolicy.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import java.util.Map;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 

[34/50] [abbrv] hadoop git commit: YARN-6511. Federation: transparently spanning application across multiple sub-clusters. (Botong Huang via Subru).

2017-09-21 Thread curino
YARN-6511. Federation: transparently spanning application across multiple 
sub-clusters. (Botong Huang via Subru).

(cherry picked from commit 8c988d235eaf0972783985b1ab24680d029aea79)
(cherry picked from commit 70b1a757f13b01a9192ea5fb0820ba7babfd974e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35a38330
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35a38330
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35a38330

Branch: refs/heads/branch-2
Commit: 35a38330ef0ff598f82ee0621a8b2aecc5c1136d
Parents: 7ede8c1
Author: Subru Krishnan 
Authored: Wed Jun 7 14:45:51 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:49:55 2017 -0700

--
 .../policies/FederationPolicyUtils.java | 168 +
 .../federation/policies/RouterPolicyFacade.java |  21 +-
 .../amrmproxy/FederationInterceptor.java| 685 ++-
 .../amrmproxy/TestFederationInterceptor.java| 251 +++
 .../TestableFederationInterceptor.java  |   6 +
 5 files changed, 1095 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35a38330/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
new file mode 100644
index 000..37ce942
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/FederationPolicyUtils.java
@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.FederationAMRMProxyPolicy;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.FederationPolicyManager;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterPolicyConfiguration;
+import 
org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class for Federation policy.
+ */
+@Private
+public final class FederationPolicyUtils {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(FederationPolicyUtils.class);
+
+  /** Disable constructor. */
+  private FederationPolicyUtils() {
+  }
+
+  /**
+   * A utilize method to instantiate a policy manager class given the type
+   * (class name) from {@link SubClusterPolicyConfiguration}.
+   *
+   * @param newType class name of the policy manager to create
+   * @return Policy manager
+   * @throws FederationPolicyInitializationException if fails
+   */
+  public static FederationPolicyManager instantiatePolicyManager(String 
newType)
+  throws FederationPolicyInitializationException {
+FederationPolicyManager federationPolicyManager = null;
+try {
+  // create policy instance and set queue
+  Class c = Class.forName(newType);
+  federationPolicyManager = (FederationPolicyManager) c.newInstance();
+} catch 

[42/50] [abbrv] hadoop git commit: Bumping up hadoop-yarn-server-router module to 3.0.0-beta1 post rebase.

2017-09-21 Thread curino
Bumping up hadoop-yarn-server-router module to 3.0.0-beta1 post rebase.

(cherry picked from commit 69e3ed26809e12dd62cb66d258ef51c66db3be0a)
(cherry picked from commit 8737c2a8e96799cf3fda730d25a01f6ea2f12e9b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66980c00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66980c00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66980c00

Branch: refs/heads/branch-2
Commit: 66980c00f6ec94c04a08c5326ca7a2ad3ba678bb
Parents: 58e2458
Author: Subru Krishnan 
Authored: Tue Jul 25 16:58:43 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:55:42 2017 -0700

--
 .../hadoop-yarn-server/hadoop-yarn-server-router/pom.xml   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66980c00/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
index 78e5e59..4eea9a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
@@ -19,7 +19,7 @@
   
 hadoop-yarn-server
 org.apache.hadoop
-3.0.0-alpha4-SNAPSHOT
+3.0.0-beta1-SNAPSHOT
   
   4.0.0
   org.apache.hadoop


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: YARN-3659. Federation: routing client invocations transparently to multiple RMs. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/169037cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java
index 7e15084..7fc4719 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/BaseRouterClientRMTest.java
@@ -119,29 +119,41 @@ public abstract class BaseRouterClientRMTest {
 return this.clientrmService;
   }
 
-  @Before
-  public void setUp() {
-this.conf = new YarnConfiguration();
+  protected YarnConfiguration createConfiguration() {
+YarnConfiguration config = new YarnConfiguration();
 String mockPassThroughInterceptorClass =
 PassThroughClientRequestInterceptor.class.getName();
 
 // Create a request intercepter pipeline for testing. The last one in the
 // chain will call the mock resource manager. The others in the chain will
 // simply forward it to the next one in the chain
-this.conf.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE,
+config.set(YarnConfiguration.ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE,
 mockPassThroughInterceptorClass + "," + mockPassThroughInterceptorClass
 + "," + mockPassThroughInterceptorClass + ","
 + MockClientRequestInterceptor.class.getName());
 
-this.conf.setInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE,
+config.setInt(YarnConfiguration.ROUTER_PIPELINE_CACHE_MAX_SIZE,
 TEST_MAX_CACHE_SIZE);
+return config;
+  }
 
+  @Before
+  public void setUp() {
+this.conf = createConfiguration();
 this.dispatcher = new AsyncDispatcher();
 this.dispatcher.init(conf);
 this.dispatcher.start();
 this.clientrmService = createAndStartRouterClientRMService();
   }
 
+  public void setUpConfig() {
+this.conf = createConfiguration();
+  }
+
+  protected Configuration getConf() {
+return this.conf;
+  }
+
   @After
   public void tearDown() {
 if (clientrmService != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/169037cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java
new file mode 100644
index 000..87dfc95
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestFederationClientInterceptor.java
@@ -0,0 +1,403 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.router.clientrm;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import 

[38/50] [abbrv] hadoop git commit: YARN-6807. Adding required missing configs to Federation configuration guide based on e2e testing. (Tanuj Nayak via Subru).

2017-09-21 Thread curino
YARN-6807. Adding required missing configs to Federation configuration guide 
based on e2e testing. (Tanuj Nayak via Subru).

(cherry picked from commit b4ac9d1b63dd4031eee8b17d2462087721050b9a)
(cherry picked from commit f427e4201f969dd4d1e086c24a48a247d10cdc2f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3fb1711
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3fb1711
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3fb1711

Branch: refs/heads/branch-2
Commit: d3fb1711a44c63966d4b8ec99511c1b2b1521217
Parents: 169037c
Author: Subru Krishnan 
Authored: Thu Jul 13 18:44:32 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:55:39 2017 -0700

--
 .../src/site/markdown/Federation.md | 53 ++--
 1 file changed, 49 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3fb1711/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
index c50ba76..79225b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/Federation.md
@@ -86,6 +86,8 @@ of the desirable properties of balance, optimal cluster 
utilization and global i
 
 *NOTE*: In the current implementation the GPG is a manual tuning process, 
simply exposed via a CLI (YARN-3657).
 
+This part of the federation system is part of future work in 
[YARN-5597](https://issues.apache.org/jira/browse/YARN-5597).
+
 
 ###Federation State-Store
 The Federation State defines the additional state that needs to be maintained 
to loosely couple multiple individual sub-clusters into a single large 
federated cluster. This includes the following information:
@@ -159,7 +161,7 @@ These are common configurations that should appear in the 
**conf/yarn-site.xml**
 |: |: |
 |`yarn.federation.enabled` | `true` | Whether federation is enabled or not |
 |`yarn.federation.state-store.class` | 
`org.apache.hadoop.yarn.server.federation.store.impl.SQLFederationStateStore` | 
The type of state-store to use. |
-|`yarn.federation.state-store.sql.url` | 
`jdbc:sqlserver://:;database` | For SQLFederationStateStore the 
name of the DB where the state is stored. |
+|`yarn.federation.state-store.sql.url` | 
`jdbc:sqlserver://:;databaseName=FederationStateStore` | For 
SQLFederationStateStore the name of the DB where the state is stored. |
 |`yarn.federation.state-store.sql.jdbc-class` | 
`com.microsoft.sqlserver.jdbc.SQLServerDataSource` | For 
SQLFederationStateStore the jdbc class to use. |
 |`yarn.federation.state-store.sql.username` | `` | For 
SQLFederationStateStore the username for the DB connection. |
 |`yarn.federation.state-store.sql.password` | `` | For 
SQLFederationStateStore the password for the DB connection. |
@@ -175,7 +177,7 @@ Optional:
 |`yarn.federation.policy-manager` | 
`org.apache.hadoop.yarn.server.federation.policies.manager.WeightedLocalityPolicyManager`
 | The choice of policy manager determines how Applications and 
ResourceRequests are routed through the system. |
 |`yarn.federation.policy-manager-params` | `` | The payload that 
configures the policy. In our example a set of weights for router and amrmproxy 
policies. This is typically generated by serializing a policymanager that has 
been configured programmatically, or by populating the state-store with the 
.json serialized form of it. |
 |`yarn.federation.subcluster-resolver.class` | 
`org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl`
 | The class used to resolve which subcluster a node belongs to, and which 
subcluster(s) a rack belongs to. |
-| `yarn.federation.machine-list` | `node1,subcluster1,rack1\n node2 , 
subcluster2, RACK1\n noDE3,subcluster3, rack2\n node4, subcluster3, rack2\n` | 
a list of Nodes, Sub-clusters, Rack, used by the 
`DefaultSubClusterResolverImpl` |
+| `yarn.federation.machine-list` | `node1,subcluster1,rack1\n node2 , 
subcluster2, RACK1\n node3,subcluster3, rack2\n node4, subcluster3, rack2\n` | 
a list of Nodes, Sub-clusters, Rack, used by the 
`DefaultSubClusterResolverImpl` |
 
 ###ON RMs:
 
@@ -200,6 +202,7 @@ These are extra configurations that should appear in the 
**conf/yarn-site.xml**
 | Property | Example | Description |
 |: |: |
 |`yarn.router.bind-host` | `0.0.0.0` | Host IP to bind the router to.  The 
actual address the server will bind to. If this optional address is set, the 
RPC and webapp servers 

[35/50] [abbrv] hadoop git commit: YARN-6724. Add ability to blacklist sub-clusters when invoking Routing policies. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
YARN-6724. Add ability to blacklist sub-clusters when invoking Routing 
policies. (Giovanni Matteo Fumarola via Subru).

(cherry picked from commit f8e5de59697cb78686f0e605dc7e93628b5f3297)
(cherry picked from commit 4cfec943b177e2123a935e70d39776521883c2bc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ed4dadd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ed4dadd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ed4dadd

Branch: refs/heads/branch-2
Commit: 8ed4daddc4f264876591722140fa7eb7ab8d482b
Parents: 35a3833
Author: Subru Krishnan 
Authored: Wed Jun 21 19:08:47 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:55:01 2017 -0700

--
 .../federation/policies/RouterPolicyFacade.java | 15 +--
 .../policies/router/FederationRouterPolicy.java | 18 +---
 .../policies/router/HashBasedRouterPolicy.java  | 22 --
 .../policies/router/LoadBasedRouterPolicy.java  |  7 +++-
 .../policies/router/PriorityRouterPolicy.java   |  7 +++-
 .../policies/router/RejectRouterPolicy.java | 26 
 .../router/UniformRandomRouterPolicy.java   | 23 +--
 .../router/WeightedRandomRouterPolicy.java  | 11 -
 .../policies/BaseFederationPoliciesTest.java|  2 +-
 .../policies/TestRouterPolicyFacade.java| 12 +++---
 .../policies/router/BaseRouterPoliciesTest.java | 43 +++-
 .../router/TestHashBasedRouterPolicy.java   |  2 +-
 .../router/TestLoadBasedRouterPolicy.java   |  2 +-
 .../router/TestPriorityRouterPolicy.java|  2 +-
 .../policies/router/TestRejectRouterPolicy.java |  4 +-
 .../router/TestUniformRandomRouterPolicy.java   |  2 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  2 +-
 17 files changed, 157 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ed4dadd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
index 5e31a08..44c1b10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/RouterPolicyFacade.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.federation.policies;
 
 import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
@@ -110,16 +111,22 @@ public class RouterPolicyFacade {
* This method provides a wrapper of all policy functionalities for routing .
* Internally it manages configuration changes, and policy init/reinit.
*
-   * @param appSubmissionContext the application to route.
+   * @param appSubmissionContext the {@link ApplicationSubmissionContext} that
+   *  has to be routed to an appropriate subCluster for execution.
*
-   * @return the id of the subcluster that will be the "home" for this
+   * @param blackListSubClusters the list of subClusters as identified by
+   *  {@link SubClusterId} to blackList from the selection of the home
+   *  subCluster.
+   *
+   * @return the {@link SubClusterId} that will be the "home" for this
* application.
*
* @throws YarnException if there are issues initializing policies, or no
*   valid sub-cluster id could be found for this app.
*/
   public SubClusterId getHomeSubcluster(
-  ApplicationSubmissionContext appSubmissionContext) throws YarnException {
+  ApplicationSubmissionContext appSubmissionContext,
+  List blackListSubClusters) throws YarnException {
 
 // the maps are concurrent, but we need to protect from reset()
 // reinitialization mid-execution by creating a new reference local to this
@@ -186,7 +193,7 @@ public class RouterPolicyFacade {
   + "and no default specified.");
 }
 
-return policy.getHomeSubcluster(appSubmissionContext);
+return policy.getHomeSubcluster(appSubmissionContext, 
blackListSubClusters);
   }
 
   /**


[14/50] [abbrv] hadoop git commit: YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via Subru).

2017-09-21 Thread curino
YARN-5872. Add AlwayReject policies for router and amrmproxy. (Carlo Curino via 
Subru).

(cherry picked from commit db26bade059ff9b38da0aa160f56653769bd0143)
(cherry picked from commit 91803305e526a4c05e95834b8a104c9b335cecb6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/002a77dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/002a77dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/002a77dc

Branch: refs/heads/branch-2
Commit: 002a77dcce0d6c3ba4866aba4ca2b0f14e45fdb5
Parents: e933a17
Author: Subru Krishnan 
Authored: Tue Nov 22 18:37:30 2016 -0800
Committer: Carlo Curino 
Committed: Thu Sep 21 16:23:46 2017 -0700

--
 .../amrmproxy/RejectAMRMProxyPolicy.java| 67 +
 .../manager/RejectAllPolicyManager.java | 40 ++
 .../policies/router/RejectRouterPolicy.java | 66 +
 .../amrmproxy/TestRejectAMRMProxyPolicy.java| 78 
 .../manager/TestRejectAllPolicyManager.java | 40 ++
 .../policies/router/TestRejectRouterPolicy.java | 63 
 6 files changed, 354 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/002a77dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
new file mode 100644
index 000..3783df6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/RejectAMRMProxyPolicy.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies.amrmproxy;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
+import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContextValidator;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyException;
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+
+/**
+ * An implementation of the {@link FederationAMRMProxyPolicy} that simply
+ * rejects all requests. Useful to prevent apps from accessing any sub-cluster.
+ */
+public class RejectAMRMProxyPolicy extends AbstractAMRMProxyPolicy {
+
+  private Set knownClusterIds = new HashSet<>();
+
+  @Override
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
+  throws FederationPolicyInitializationException {
+// overrides initialize to avoid weight checks that do no apply for
+// this policy.
+FederationPolicyInitializationContextValidator.validate(policyContext,
+this.getClass().getCanonicalName());
+setPolicyContext(policyContext);
+  }
+
+  @Override
+  public Map splitResourceRequests(
+  List resourceRequests) throws YarnException {
+throw new FederationPolicyException("The policy configured for this queue "
++ "rejects all routing requests by construction.");
+  }
+
+  @Override
+  public void 

[01/50] [abbrv] hadoop git commit: YARN-3671. Integrate Federation services with ResourceManager. Contributed by Subru Krishnan

2017-09-21 Thread curino
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fc6d9612c -> d11be2dca


YARN-3671. Integrate Federation services with ResourceManager. Contributed by 
Subru Krishnan

(cherry picked from commit 8573c286e27623155c715a632d56a68f23523c72)
(cherry picked from commit 94a24567d65469091c8b521987dc2003d0327159)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea2ecdb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea2ecdb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea2ecdb7

Branch: refs/heads/branch-2
Commit: ea2ecdb796297573ee66727be15e41c0350408fa
Parents: 615c912
Author: Jian He 
Authored: Tue Aug 30 12:20:52 2016 +0800
Committer: Carlo Curino 
Committed: Thu Sep 21 16:23:33 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  11 +-
 .../yarn/conf/TestYarnConfigurationFields.java  |   4 +-
 .../failover/FederationProxyProviderUtil.java   |   2 +-
 .../FederationRMFailoverProxyProvider.java  |   4 +-
 ...ationMembershipStateStoreInputValidator.java |   7 +-
 .../TestFederationStateStoreInputValidator.java |  10 +-
 .../server/resourcemanager/ResourceManager.java |  27 ++
 .../FederationStateStoreHeartbeat.java  | 108 +++
 .../federation/FederationStateStoreService.java | 304 +++
 .../federation/package-info.java|  17 ++
 .../webapp/dao/ClusterMetricsInfo.java  |   5 +-
 .../TestFederationRMStateStoreService.java  | 170 +++
 12 files changed, 649 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea2ecdb7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index eec0cd0..c9db167 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2515,9 +2515,6 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_PREFIX + "failover.enabled";
   public static final boolean DEFAULT_FEDERATION_FAILOVER_ENABLED = true;
 
-  public static final String FEDERATION_SUBCLUSTER_ID =
-  FEDERATION_PREFIX + "sub-cluster.id";
-
   public static final String FEDERATION_STATESTORE_CLIENT_CLASS =
   FEDERATION_PREFIX + "state-store.class";
 
@@ -2530,6 +2527,14 @@ public class YarnConfiguration extends Configuration {
   // 5 minutes
   public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60;
 
+  public static final String FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  FEDERATION_PREFIX + "state-store.heartbeat-interval-secs";
+
+  // 5 minutes
+  public static final int
+  DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS =
+  5 * 60;
+
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea2ecdb7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 2d02c30..38e2668 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -74,9 +74,9 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 configurationPropsToSkipCompare
 .add(YarnConfiguration.DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS);
 configurationPropsToSkipCompare
-.add(YarnConfiguration.FEDERATION_SUBCLUSTER_ID);
-configurationPropsToSkipCompare
 .add(YarnConfiguration.FEDERATION_FAILOVER_ENABLED);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS);
 
 // Ignore blacklisting nodes for AM failures feature since it is still a
 // "work in progress"


[18/50] [abbrv] hadoop git commit: YARN-6247. Share a single instance of SubClusterResolver instead of instantiating one per AM. (Botong Huang via Subru)

2017-09-21 Thread curino
YARN-6247. Share a single instance of SubClusterResolver instead of 
instantiating one per AM. (Botong Huang via Subru)

(cherry picked from commit 51aeb2ce0c599176aca9466a939c3ad55df30036)
(cherry picked from commit 86b2bec56e28a2d1ece53ab5a452860fd0444268)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61c07e4f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61c07e4f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61c07e4f

Branch: refs/heads/branch-2
Commit: 61c07e4f3ff23d130eae487ea7d058746bb1166a
Parents: 6191fac
Author: Subru Krishnan 
Authored: Thu Mar 2 18:54:53 2017 -0800
Committer: Carlo Curino 
Committed: Thu Sep 21 16:25:09 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  6 +++
 .../src/main/resources/yarn-default.xml |  7 +++
 .../resolver/AbstractSubClusterResolver.java|  6 +--
 .../federation/resolver/SubClusterResolver.java |  4 +-
 .../utils/FederationStateStoreFacade.java   | 48 +---
 5 files changed, 59 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61c07e4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 906d632..fffef1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2541,6 +2541,12 @@ public class YarnConfiguration extends Configuration {
   public static final String FEDERATION_MACHINE_LIST =
   FEDERATION_PREFIX + "machine-list";
 
+  public static final String FEDERATION_CLUSTER_RESOLVER_CLASS =
+  FEDERATION_PREFIX + "subcluster-resolver.class";
+  public static final String DEFAULT_FEDERATION_CLUSTER_RESOLVER_CLASS =
+  "org.apache.hadoop.yarn.server.federation.resolver."
+  + "DefaultSubClusterResolverImpl";
+
   public static final String DEFAULT_FEDERATION_POLICY_KEY = "*";
 
   public static final String FEDERATION_POLICY_MANAGER = FEDERATION_PREFIX

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61c07e4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index bc9e853..074a95d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2632,6 +2632,13 @@
 
 yarn.federation.machine-list
   
+  
+
+  Class name for SubClusterResolver
+
+yarn.federation.subcluster-resolver.class
+
org.apache.hadoop.yarn.server.federation.resolver.DefaultSubClusterResolverImpl
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61c07e4f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
index 6b4f60c..bccff2d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/resolver/AbstractSubClusterResolver.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.federation.resolver;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
 
-import java.util.HashMap;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.Map;
 
 /**
@@ -31,9 +31,9 @@ 

[19/50] [abbrv] hadoop git commit: YARN-6370. Properly handle rack requests for non-active subclusters in LocalityMulticastAMRMProxyPolicy. (Contributed by Botong Huang via curino).

2017-09-21 Thread curino
YARN-6370. Properly handle rack requests for non-active subclusters in 
LocalityMulticastAMRMProxyPolicy. (Contributed by Botong Huang via curino).

(cherry picked from commit ce419881c32b178c48c3a01b5a15e4e3a3e750f5)
(cherry picked from commit 1c64e1709b627846f29dc2fe6d637f074de8b403)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b40bdafe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b40bdafe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b40bdafe

Branch: refs/heads/branch-2
Commit: b40bdafe31168fc60f4a6d3a060e7d16097b14ff
Parents: 61c07e4
Author: Carlo Curino 
Authored: Wed Mar 22 13:53:47 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:25:10 2017 -0700

--
 .../LocalityMulticastAMRMProxyPolicy.java   |  6 ++-
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 53 +---
 2 files changed, 41 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b40bdafe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 6f97a51..454962f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -261,7 +261,11 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 
   // If home-subcluster is not active, ignore node/rack request
   if (bookkeeper.isActiveAndEnabled(homeSubcluster)) {
-bookkeeper.addLocalizedNodeRR(homeSubcluster, rr);
+if (targetIds != null && targetIds.size() > 0) {
+  bookkeeper.addRackRR(homeSubcluster, rr);
+} else {
+  bookkeeper.addLocalizedNodeRR(homeSubcluster, rr);
+}
   } else {
 if (LOG.isDebugEnabled()) {
   LOG.debug("The homeSubCluster (" + homeSubcluster + ") we are "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b40bdafe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
index 5b3cf74..6e3a2f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/TestLocalityMulticastAMRMProxyPolicy.java
@@ -339,19 +339,20 @@ public class TestLocalityMulticastAMRMProxyPolicy
 validateSplit(response, resourceRequests);
 prettyPrintRequests(response);
 
-// we expect 4 entry for home subcluster (3 for request-id 4, and a part
-// of the broadcast of request-id 2
-checkExpectedAllocation(response, getHomeSubCluster().getId(), 4, 23);
+// we expect 7 entries for home subcluster (2 for request-id 4, 3 for
+// request-id 5, and a part of the broadcast of request-id 2
+checkExpectedAllocation(response, getHomeSubCluster().getId(), 7, 29);
 
-// for subcluster0 we expect 3 entry from request-id 0, and 3 from
-// request-id 3, as well as part of the request-id 2 broadast
-checkExpectedAllocation(response, "subcluster0", 7, 26);
+// for subcluster0 we expect 10 entries, 3 from request-id 0, and 3 from
+// request-id 3, 3 entries from request-id 5, as well as part of the
+// 

[24/50] [abbrv] hadoop git commit: YARN-5411. Create a proxy chain for ApplicationClientProtocol in the Router. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0a2e6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
new file mode 100644
index 000..a9c3729
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
@@ -0,0 +1,210 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.router.clientrm;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.router.clientrm.RouterClientRMService.RequestInterceptorChainWrapper;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test class to validate the ClientRM Service inside the Router.
+ */
+public class TestRouterClientRMService extends BaseRouterClientRMTest {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestRouterClientRMService.class);
+
+  /**
+   * Tests if the pipeline is created properly.
+   */
+  @Test
+  public void testRequestInterceptorChainCreation() throws Exception {
+ClientRequestInterceptor root =
+super.getRouterClientRMService().createRequestInterceptorChain();
+int index = 0;
+while (root != null) {
+  // The current pipeline is:
+  // PassThroughClientRequestInterceptor - index = 0
+  // PassThroughClientRequestInterceptor - index = 1
+  // PassThroughClientRequestInterceptor - index = 2
+  // MockClientRequestInterceptor - index = 3
+  switch (index) {
+  case 0: // Fall to the next case
+  case 1: // Fall to the next case
+  case 2:
+// If index is equal to 0,1 or 2 we fall in this check
+
Assert.assertEquals(PassThroughClientRequestInterceptor.class.getName(),
+root.getClass().getName());
+break;
+  case 3:
+Assert.assertEquals(MockClientRequestInterceptor.class.getName(),
+root.getClass().getName());
+break;
+  default:
+Assert.fail();
+  }
+  root = root.getNextInterceptor();
+  index++;
+}
+Assert.assertEquals("The number of interceptors in chain does not match", 
4,
+index);
+  }
+
+  /**
+   * Test if the RouterClientRM forwards all the requests to the MockRM and get
+   * back the responses.
+   */
+  @Test
+  public void testRouterClientRMServiceE2E() throws Exception {
+
+String user = "test1";
+
+LOG.info("testRouterClientRMServiceE2E - Get New Application");
+
+GetNewApplicationResponse responseGetNewApp = 

[49/50] [abbrv] hadoop git commit: YARN-2280. Resource manager web service fields are not accessible (Krisztian Horvath via aw)

2017-09-21 Thread curino
YARN-2280. Resource manager web service fields are not accessible (Krisztian 
Horvath via aw)

(cherry picked from commit a5cf985bf501fd032124d121dcae80538db9e380)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/baa2bec8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/baa2bec8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/baa2bec8

Branch: refs/heads/branch-2
Commit: baa2bec84399cf4c8dbd8306eeb8f3e0ea71e45a
Parents: d3afff7
Author: Allen Wittenauer 
Authored: Tue Mar 10 16:32:20 2015 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 17:52:25 2017 -0700

--
 .../hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java  | 3 +++
 .../yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java | 3 +++
 2 files changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/baa2bec8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
index 7be9a6f..7dacd10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodesInfo.java
@@ -36,4 +36,7 @@ public class NodesInfo {
 node.add(nodeinfo);
   }
 
+  public ArrayList getNodes() {
+return node;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/baa2bec8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
index 34078f1..22018d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerTypeInfo.java
@@ -34,4 +34,7 @@ public class SchedulerTypeInfo {
 this.schedulerInfo = scheduler;
   }
 
+  public SchedulerInfo getSchedulerInfo() {
+return schedulerInfo;
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: YARN-5602. Utils for Federation State and Policy Store. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
YARN-5602. Utils for Federation State and Policy Store. (Giovanni Matteo 
Fumarola via Subru).

(cherry picked from commit 326a2e6bde1cf266ecc7d3b513cdaac6abcebbe4)
(cherry picked from commit e1da8f0667589dd660e6fcd776cc87f1b8ef6db9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9dcf928
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9dcf928
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9dcf928

Branch: refs/heads/branch-2
Commit: b9dcf9283e29281e55a65b59bd74c21dc6806296
Parents: b40bdaf
Author: Subru Krishnan 
Authored: Wed Apr 5 15:02:00 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:25:10 2017 -0700

--
 .../FederationStateStoreErrorCode.java  | 105 +
 .../FederationStateStoreException.java  |  45 ++
 ...derationStateStoreInvalidInputException.java |  48 ++
 .../FederationStateStoreRetriableException.java |  44 ++
 .../store/exception/package-info.java   |  17 ++
 .../store/impl/MemoryFederationStateStore.java  |  56 +--
 .../store/records/SubClusterInfo.java   |  62 
 .../records/impl/pb/SubClusterInfoPBImpl.java   |  16 --
 ...cationHomeSubClusterStoreInputValidator.java |   1 +
 ...ationMembershipStateStoreInputValidator.java |   1 +
 .../FederationPolicyStoreInputValidator.java|   1 +
 ...derationStateStoreInvalidInputException.java |  48 --
 .../store/utils/FederationStateStoreUtils.java  | 155 +++
 .../utils/FederationStateStoreFacade.java   |  23 ++-
 .../impl/FederationStateStoreBaseTest.java  |  91 ++-
 .../impl/TestMemoryFederationStateStore.java|   4 +-
 .../TestFederationStateStoreInputValidator.java |   1 +
 .../TestFederationStateStoreFacadeRetry.java| 125 +++
 18 files changed, 730 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9dcf928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreErrorCode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreErrorCode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreErrorCode.java
new file mode 100644
index 000..88e2d3a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/exception/FederationStateStoreErrorCode.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.store.exception;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * 
+ * Logical error codes from FederationStateStore.
+ * 
+ */
+@Public
+@Unstable
+public enum FederationStateStoreErrorCode {
+
+  MEMBERSHIP_INSERT_FAIL(1101, "Fail to insert a tuple into Membership 
table."),
+
+  MEMBERSHIP_DELETE_FAIL(1102, "Fail to delete a tuple from Membership 
table."),
+
+  MEMBERSHIP_SINGLE_SELECT_FAIL(1103,
+  "Fail to select a tuple from Membership table."),
+
+  MEMBERSHIP_MULTIPLE_SELECT_FAIL(1104,
+  "Fail to select multiple tuples from Membership table."),
+
+  MEMBERSHIP_UPDATE_DEREGISTER_FAIL(1105,
+  "Fail to update/deregister a tuple in Membership table."),
+
+  MEMBERSHIP_UPDATE_HEARTBEAT_FAIL(1106,
+  "Fail to update/heartbeat a tuple in Membership table."),
+
+  APPLICATIONS_INSERT_FAIL(1201,
+  "Fail to insert a tuple into ApplicationsHomeSubCluster table."),
+
+  APPLICATIONS_DELETE_FAIL(1202,
+  "Fail to delete a tuple from 

[05/50] [abbrv] hadoop git commit: YARN-5325. Stateless ARMRMProxy policies implementation. (Carlo Curino via Subru).

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5ab53f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
index e57709f..5de749f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/LoadBasedRouterPolicy.java
@@ -17,8 +17,8 @@
 
 package org.apache.hadoop.yarn.server.federation.policies.router;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
@@ -30,34 +30,27 @@ import 
org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 
-import java.util.Map;
-
 /**
  * This implements a simple load-balancing policy. The policy "weights" are
  * binary 0/1 values that enable/disable each sub-cluster, and the policy peaks
  * the sub-cluster with the least load to forward this application.
  */
-public class LoadBasedRouterPolicy
-extends BaseWeightedRouterPolicy {
-
-  private static final Log LOG =
-  LogFactory.getLog(LoadBasedRouterPolicy.class);
+public class LoadBasedRouterPolicy extends AbstractRouterPolicy {
 
   @Override
-  public void reinitialize(FederationPolicyInitializationContext
-  federationPolicyContext)
+  public void reinitialize(FederationPolicyInitializationContext policyContext)
   throws FederationPolicyInitializationException {
 
 // remember old policyInfo
 WeightedPolicyInfo tempPolicy = getPolicyInfo();
 
-//attempt new initialization
-super.reinitialize(federationPolicyContext);
+// attempt new initialization
+super.reinitialize(policyContext);
 
-//check extra constraints
+// check extra constraints
 for (Float weight : getPolicyInfo().getRouterPolicyWeights().values()) {
   if (weight != 0 && weight != 1) {
-//reset to old policyInfo if check fails
+// reset to old policyInfo if check fails
 setPolicyInfo(tempPolicy);
 throw new FederationPolicyInitializationException(
 this.getClass().getCanonicalName()
@@ -69,18 +62,16 @@ public class LoadBasedRouterPolicy
 
   @Override
   public SubClusterId getHomeSubcluster(
-  ApplicationSubmissionContext appSubmissionContext)
-  throws YarnException {
+  ApplicationSubmissionContext appSubmissionContext) throws YarnException {
 
 Map activeSubclusters =
 getActiveSubclusters();
 
-Map weights = getPolicyInfo()
-.getRouterPolicyWeights();
+Map weights =
+getPolicyInfo().getRouterPolicyWeights();
 SubClusterIdInfo chosen = null;
 long currBestMem = -1;
-for (Map.Entry entry :
-activeSubclusters
+for (Map.Entry entry : activeSubclusters
 .entrySet()) {
   SubClusterIdInfo id = new SubClusterIdInfo(entry.getKey());
   if (weights.containsKey(id) && weights.get(id) > 0) {
@@ -95,8 +86,7 @@ public class LoadBasedRouterPolicy
 return chosen.toId();
   }
 
-  private long getAvailableMemory(SubClusterInfo value)
-  throws YarnException {
+  private long getAvailableMemory(SubClusterInfo value) throws YarnException {
 try {
   long mem = -1;
   JSONObject obj = new JSONObject(value.getCapability());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c5ab53f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/router/PriorityRouterPolicy.java
 

[12/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32ffa9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
index 4975a9f..5fa02d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestRouterPolicyFacade.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.PriorityBroadcastPolicyManager;
+import 
org.apache.hadoop.yarn.server.federation.policies.manager.UniformBroadcastPolicyManager;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.PriorityRouterPolicy;
 import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
 import org.apache.hadoop.yarn.server.federation.resolver.SubClusterResolver;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32ffa9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
deleted file mode 100644
index 542a5ae..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestUniformBroadcastPolicyManager.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.federation.policies;
-
-import 
org.apache.hadoop.yarn.server.federation.policies.amrmproxy.BroadcastAMRMProxyPolicy;
-import 
org.apache.hadoop.yarn.server.federation.policies.router.UniformRandomRouterPolicy;
-import org.junit.Before;
-
-/**
- * Simple test of {@link UniformBroadcastPolicyManager}.
- */
-public class TestUniformBroadcastPolicyManager extends BasePolicyManagerTest {
-
-  @Before
-  public void setup() {
-//config policy
-wfp = new UniformBroadcastPolicyManager();
-wfp.setQueue("queue1");
-
-//set expected params that the base test class will use for tests
-expectedPolicyManager = UniformBroadcastPolicyManager.class;
-expectedAMRMProxyPolicy = BroadcastAMRMProxyPolicy.class;
-expectedRouterPolicy = UniformRandomRouterPolicy.class;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32ffa9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/TestWeightedLocalityPolicyManager.java
 

[23/50] [abbrv] hadoop git commit: YARN-3663. Federation State and Policy Store (DBMS implementation). (Giovanni Matteo Fumarola via curino).

2017-09-21 Thread curino
YARN-3663. Federation State and Policy Store (DBMS implementation). (Giovanni 
Matteo Fumarola via curino).

(cherry picked from commit be99c1fe2eb150fabd69902118d65941f82971f6)
(cherry picked from commit 5272af8c7eab76d779c621eb0208bf29ffa25613)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c5dfa0f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c5dfa0f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c5dfa0f

Branch: refs/heads/branch-2
Commit: 3c5dfa0fe551f22e4550b61d3bf689857c9e418d
Parents: c38ac05
Author: Carlo Curino 
Authored: Tue Apr 25 15:14:02 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:33:31 2017 -0700

--
 LICENSE.txt |   2 +
 hadoop-project/pom.xml  |  26 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |  23 +
 .../yarn/conf/TestYarnConfigurationFields.java  |  14 +
 .../hadoop-yarn-server-common/pom.xml   |  20 +
 .../FederationStateStoreErrorCode.java  | 105 ---
 .../FederationStateStoreException.java  |  17 +-
 .../store/impl/MemoryFederationStateStore.java  |  81 +-
 .../store/impl/SQLFederationStateStore.java | 937 +++
 .../store/records/SubClusterState.java  |  21 +
 ...cationHomeSubClusterStoreInputValidator.java |  12 +-
 ...ationMembershipStateStoreInputValidator.java |  14 +-
 .../FederationPolicyStoreInputValidator.java|   6 +-
 .../store/utils/FederationStateStoreUtils.java  | 109 ++-
 .../impl/FederationStateStoreBaseTest.java  |  74 +-
 .../store/impl/HSQLDBFederationStateStore.java  | 252 +
 .../impl/TestMemoryFederationStateStore.java|   3 +-
 .../store/impl/TestSQLFederationStateStore.java |  49 +
 .../TestFederationStateStoreInputValidator.java | 146 +--
 .../TestFederationStateStoreFacadeRetry.java|   7 +-
 .../FederationStateStoreStoreProcs.sql  | 511 ++
 .../SQLServer/FederationStateStoreTables.sql| 122 +++
 22 files changed, 2242 insertions(+), 309 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c5dfa0f/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index 65b9d42..b25ec5a 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -582,6 +582,7 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
+Microsoft SQLServer - JDBC version 6.1.0.jre7
 

 
 Copyright jQuery Foundation and other contributors, https://jquery.org/
@@ -659,6 +660,7 @@ The binary distribution of this product bundles these 
dependencies under the
 following license:
 HSQLDB Database 2.3.4
 

+(HSQL License)
 "COPYRIGHTS AND LICENSES (based on BSD License)
 
 For work developed by the HSQL Development Group:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c5dfa0f/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index eb8fde9..152dcc7 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -88,6 +88,11 @@
 
 2.0.0-M15
 
+1.0.0
+3.0.3
+2.4.11
+6.1.0.jre7
+
 
 1.7
 
@@ -1073,7 +1078,26 @@
 jsonassert
 1.3.0
 
-
+
+  javax.cache
+  cache-api
+  ${jcache.version}
+
+
+  org.ehcache
+  ehcache
+  ${ehcache.version}
+
+
+  com.zaxxer
+  HikariCP-java7
+  ${hikari.version}
+
+
+  com.microsoft.sqlserver
+  mssql-jdbc
+  ${mssql.version}
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c5dfa0f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index fffef1c..6fbf1a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2561,6 

[46/50] [abbrv] hadoop git commit: YARN-5412. Create a proxy chain for ResourceManager REST API in the Router. (Contributed by Giovanni Matteo Fumarola via curino)

2017-09-21 Thread curino
YARN-5412. Create a proxy chain for ResourceManager REST API in the Router. 
(Contributed by Giovanni Matteo Fumarola via curino)

(cherry picked from commit b6240b92abf453affc5fd64e1eedf2d29842aa75)
(cherry picked from commit acda6b96a4e92e432bd1d97fa14004a11e70387e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfd967d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfd967d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfd967d3

Branch: refs/heads/branch-2
Commit: bfd967d33866d7a3067f0b7cd107d5d45e4adf6e
Parents: 049f7c8
Author: Carlo Curino 
Authored: Thu Jul 27 14:34:45 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 17:13:28 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   24 +
 .../hadoop/yarn/webapp/util/WebAppUtils.java|   14 +
 .../src/main/resources/yarn-default.xml |   30 +
 .../resourcemanager/webapp/RMWSConsts.java  |   15 +
 .../resourcemanager/webapp/RMWebAppUtil.java|   29 +
 .../webapp/RMWebServiceProtocol.java|  133 +-
 .../resourcemanager/webapp/RMWebServices.java   |4 +-
 .../webapp/dao/AppAttemptInfo.java  |5 +-
 .../TestFederationRMStateStoreService.java  |9 +-
 .../hadoop-yarn-server-router/pom.xml   |   34 +-
 .../hadoop/yarn/server/router/Router.java   |   35 +
 .../webapp/AbstractRESTRequestInterceptor.java  |   89 ++
 .../webapp/DefaultRequestInterceptorREST.java   |  496 +++
 .../yarn/server/router/webapp/HTTPMethods.java  |   34 +
 .../router/webapp/RESTRequestInterceptor.java   |  125 ++
 .../yarn/server/router/webapp/RouterWebApp.java |   48 +
 .../router/webapp/RouterWebServiceUtil.java |  227 +++
 .../server/router/webapp/RouterWebServices.java |  876 
 .../yarn/server/router/webapp/package-info.java |   20 +
 .../webapp/BaseRouterWebServicesTest.java   |  601 
 .../yarn/server/router/webapp/JavaProcess.java  |   52 +
 .../webapp/MockRESTRequestInterceptor.java  |  340 +
 .../PassThroughRESTRequestInterceptor.java  |  339 +
 .../router/webapp/TestRouterWebServices.java|  269 
 .../webapp/TestRouterWebServicesREST.java   | 1298 ++
 .../src/test/resources/capacity-scheduler.xml   |  111 ++
 .../src/test/resources/log4j.properties |   19 +
 .../src/test/resources/yarn-site.xml|   30 +
 28 files changed, 5237 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd967d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7adfdf1..34374cf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2629,6 +2629,30 @@ public class YarnConfiguration extends Configuration {
   ROUTER_PREFIX + "submit.retry";
   public static final int DEFAULT_ROUTER_CLIENTRM_SUBMIT_RETRY = 3;
 
+  public static final String ROUTER_WEBAPP_PREFIX = ROUTER_PREFIX + "webapp.";
+
+  /** The address of the Router web application. */
+  public static final String ROUTER_WEBAPP_ADDRESS =
+  ROUTER_WEBAPP_PREFIX + "address";
+
+  public static final int DEFAULT_ROUTER_WEBAPP_PORT = 8089;
+  public static final String DEFAULT_ROUTER_WEBAPP_ADDRESS =
+  "0.0.0.0:" + DEFAULT_ROUTER_WEBAPP_PORT;
+
+  /** The https address of the Router web application. */
+  public static final String ROUTER_WEBAPP_HTTPS_ADDRESS =
+  ROUTER_WEBAPP_PREFIX + "https.address";
+
+  public static final int DEFAULT_ROUTER_WEBAPP_HTTPS_PORT = 8091;
+  public static final String DEFAULT_ROUTER_WEBAPP_HTTPS_ADDRESS =
+  "0.0.0.0:" + DEFAULT_ROUTER_WEBAPP_HTTPS_PORT;
+
+  public static final String ROUTER_WEBAPP_INTERCEPTOR_CLASS_PIPELINE =
+  ROUTER_WEBAPP_PREFIX + "interceptor-class.pipeline";
+  public static final String DEFAULT_ROUTER_WEBAPP_INTERCEPTOR_CLASS =
+  "org.apache.hadoop.yarn.server.router.webapp."
+  + "DefaultRequestInterceptorREST";
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd967d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java

[48/50] [abbrv] hadoop git commit: YARN-6853. Add MySql Scripts for FederationStateStore. (Contributed by Giovanni Matteo Fumarola via curino)

2017-09-21 Thread curino
YARN-6853. Add MySql Scripts for FederationStateStore. (Contributed by Giovanni 
Matteo Fumarola via curino)

(cherry picked from commit 874ddbf0b5b1d34aca70ee7fc303cbffdde67236)
(cherry picked from commit 9625a030dee1f567f3b91d74acccb8b15fe25428)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3afff76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3afff76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3afff76

Branch: refs/heads/branch-2
Commit: d3afff76034b2318550f8c4c14cbb8408b6fc187
Parents: 190b79a
Author: Carlo Curino 
Authored: Tue Aug 1 17:18:20 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 17:15:36 2017 -0700

--
 .../MySQL/FederationStateStoreDatabase.sql  |  21 +++
 .../MySQL/FederationStateStoreStoredProcs.sql   | 162 +++
 .../MySQL/FederationStateStoreTables.sql|  47 ++
 .../MySQL/FederationStateStoreUser.sql  |  25 +++
 .../FederationStateStore/MySQL/dropDatabase.sql |  21 +++
 .../MySQL/dropStoreProcedures.sql   |  47 ++
 .../FederationStateStore/MySQL/dropTables.sql   |  27 
 .../bin/FederationStateStore/MySQL/dropUser.sql |  21 +++
 .../src/site/markdown/Federation.md |  18 ++-
 9 files changed, 386 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3afff76/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql
 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql
new file mode 100644
index 000..68649e6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreDatabase.sql
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Script to create a new Database in MySQL for the Federation StateStore
+
+CREATE database FederationStateStore;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3afff76/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql
 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql
new file mode 100644
index 000..eae882e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreStoredProcs.sql
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- Script to generate all the stored procedures for the Federation StateStore 
in MySQL
+
+USE FederationStateStore
+
+DELIMITER //
+
+CREATE PROCEDURE sp_registerSubCluster(
+   IN subClusterId_IN varchar(256),
+   IN amRMServiceAddress_IN varchar(256),
+   IN clientRMServiceAddress_IN varchar(256),
+   IN rmAdminServiceAddress_IN varchar(256),
+   IN rmWebServiceAddress_IN varchar(256),
+   IN state_IN varchar(256),
+   IN lastStartTime_IN bigint, IN 

[26/50] [abbrv] hadoop git commit: YARN-5411. Create a proxy chain for ApplicationClientProtocol in the Router. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
YARN-5411. Create a proxy chain for ApplicationClientProtocol in the Router. 
(Giovanni Matteo Fumarola via Subru).

(cherry picked from commit 4846069061b6baa06da3b524b9e36567dd368388)
(cherry picked from commit 80e1904000923819c98ae68456cb0452c255d5f3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc0a2e6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc0a2e6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc0a2e6c

Branch: refs/heads/branch-2
Commit: dc0a2e6c593851342d6faf48a25bb07d4c3550cd
Parents: 3c5dfa0
Author: Subru Krishnan 
Authored: Wed May 3 18:26:15 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:43:08 2017 -0700

--
 hadoop-project/pom.xml  |   7 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  21 +
 .../hadoop/yarn/util/LRUCacheHashMap.java   |  49 ++
 .../src/main/resources/yarn-default.xml |  18 +
 .../hadoop/yarn/util/TestLRUCacheHashMap.java   |  74 +++
 .../hadoop-yarn-server-common/pom.xml   |  11 +
 .../yarn/server/MockResourceManagerFacade.java  | 513 +
 .../hadoop-yarn-server-nodemanager/pom.xml  |   7 +
 .../amrmproxy/MockRequestInterceptor.java   |  14 +-
 .../amrmproxy/MockResourceManagerFacade.java| 513 -
 .../hadoop-yarn-server-router/pom.xml   |  19 +
 .../hadoop/yarn/server/router/Router.java   |  98 +++-
 .../AbstractClientRequestInterceptor.java   |  89 +++
 .../clientrm/ClientRequestInterceptor.java  |  65 +++
 .../DefaultClientRequestInterceptor.java| 334 +++
 .../router/clientrm/RouterClientRMService.java  | 544 ++
 .../server/router/clientrm/package-info.java|  20 +
 .../hadoop/yarn/server/router/TestRouter.java   |  26 -
 .../router/clientrm/BaseRouterClientRMTest.java | 574 +++
 .../clientrm/MockClientRequestInterceptor.java  |  36 ++
 .../PassThroughClientRequestInterceptor.java| 267 +
 .../clientrm/TestRouterClientRMService.java | 210 +++
 22 files changed, 2962 insertions(+), 547 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0a2e6c/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 152dcc7..d61dc4b 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -273,6 +273,13 @@
 
   
 org.apache.hadoop
+hadoop-yarn-server-common
+${project.version}
+test-jar
+  
+
+  
+org.apache.hadoop
  hadoop-yarn-server-tests
 ${project.version}
 test-jar

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0a2e6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 6fbf1a6..a3b53d6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2584,6 +2584,27 @@ public class YarnConfiguration extends Configuration {
 
   public static final int DEFAULT_FEDERATION_STATESTORE_SQL_MAXCONNECTIONS = 1;
 
+  public static final String ROUTER_PREFIX = YARN_PREFIX + "router.";
+
+  public static final String ROUTER_CLIENTRM_PREFIX =
+  ROUTER_PREFIX + "clientrm.";
+
+  public static final String ROUTER_CLIENTRM_ADDRESS =
+  ROUTER_CLIENTRM_PREFIX + ".address";
+  public static final int DEFAULT_ROUTER_CLIENTRM_PORT = 8050;
+  public static final String DEFAULT_ROUTER_CLIENTRM_ADDRESS =
+  "0.0.0.0:" + DEFAULT_ROUTER_CLIENTRM_PORT;
+
+  public static final String ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE =
+  ROUTER_CLIENTRM_PREFIX + "interceptor-class.pipeline";
+  public static final String DEFAULT_ROUTER_CLIENTRM_INTERCEPTOR_CLASS =
+  "org.apache.hadoop.yarn.server.router.clientrm."
+  + "DefaultClientRequestInterceptor";
+
+  public static final String ROUTER_CLIENTRM_PIPELINE_CACHE_MAX_SIZE =
+  ROUTER_CLIENTRM_PREFIX + "cache-max-size";
+  public static final int DEFAULT_ROUTER_CLIENTRM_PIPELINE_CACHE_MAX_SIZE = 25;
+
   
   // Other Configs
   


[13/50] [abbrv] hadoop git commit: YARN-5676. Add a HashBasedRouterPolicy, and small policies and test refactoring. (Carlo Curino via Subru).

2017-09-21 Thread curino
YARN-5676. Add a HashBasedRouterPolicy, and small policies and test 
refactoring. (Carlo Curino via Subru).

(cherry picked from commit 575137f41c27eb72d05d923337f3030a35403e8f)
(cherry picked from commit 4128c9522dcdc16bb3527f74a48ed1242458a165)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d32ffa9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d32ffa9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d32ffa9e

Branch: refs/heads/branch-2
Commit: d32ffa9e5ebb55b1fed4948f3750ce2159d235b4
Parents: d87d2b5
Author: Subru Krishnan 
Authored: Tue Nov 22 15:02:22 2016 -0800
Committer: Carlo Curino 
Committed: Thu Sep 21 16:23:45 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +-
 .../policies/AbstractPolicyManager.java | 175 -
 .../policies/FederationPolicyManager.java   | 117 
 .../PriorityBroadcastPolicyManager.java |  66 ---
 .../federation/policies/RouterPolicyFacade.java |   1 +
 .../policies/UniformBroadcastPolicyManager.java |  56 --
 .../policies/WeightedLocalityPolicyManager.java |  67 ---
 .../policies/manager/AbstractPolicyManager.java | 190 +++
 .../manager/FederationPolicyManager.java| 118 
 .../manager/HashBroadcastPolicyManager.java |  38 
 .../manager/PriorityBroadcastPolicyManager.java |  66 +++
 .../manager/UniformBroadcastPolicyManager.java  |  44 +
 .../manager/WeightedLocalityPolicyManager.java  |  67 +++
 .../policies/manager/package-info.java  |  19 ++
 .../policies/router/AbstractRouterPolicy.java   |  19 ++
 .../policies/router/HashBasedRouterPolicy.java  |  81 
 .../policies/router/LoadBasedRouterPolicy.java  |   3 +
 .../policies/router/PriorityRouterPolicy.java   |   3 +
 .../router/UniformRandomRouterPolicy.java   |  10 +-
 .../router/WeightedRandomRouterPolicy.java  |   3 +
 .../policies/BaseFederationPoliciesTest.java|  17 +-
 .../policies/BasePolicyManagerTest.java | 108 ---
 ...ionPolicyInitializationContextValidator.java |   1 +
 .../TestPriorityBroadcastPolicyManager.java |  72 ---
 .../policies/TestRouterPolicyFacade.java|   2 +
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/manager/BasePolicyManagerTest.java | 104 ++
 .../TestHashBasedBroadcastPolicyManager.java|  40 
 .../TestPriorityBroadcastPolicyManager.java |  72 +++
 .../TestUniformBroadcastPolicyManager.java  |  40 
 .../TestWeightedLocalityPolicyManager.java  |  79 
 .../policies/router/BaseRouterPoliciesTest.java |  51 +
 .../router/TestHashBasedRouterPolicy.java   |  83 
 .../router/TestLoadBasedRouterPolicy.java   |   3 +-
 .../router/TestPriorityRouterPolicy.java|   3 +-
 .../router/TestUniformRandomRouterPolicy.java   |   3 +-
 .../router/TestWeightedRandomRouterPolicy.java  |  15 +-
 38 files changed, 1160 insertions(+), 798 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32ffa9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 055428d..906d632 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2547,7 +2547,8 @@ public class YarnConfiguration extends Configuration {
   + "policy-manager";
 
   public static final String DEFAULT_FEDERATION_POLICY_MANAGER = "org.apache"
-  + 
".hadoop.yarn.server.federation.policies.UniformBroadcastPolicyManager";
+  + ".hadoop.yarn.server.federation.policies"
+  + ".manager.UniformBroadcastPolicyManager";
 
   public static final String FEDERATION_POLICY_MANAGER_PARAMS =
   FEDERATION_PREFIX + "policy-manager-params";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32ffa9e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/AbstractPolicyManager.java
--
diff --git 

[29/50] [abbrv] hadoop git commit: YARN-5531. UnmanagedAM pool manager for federating application across clusters. (Botong Huang via Subru).

2017-09-21 Thread curino
YARN-5531. UnmanagedAM pool manager for federating application across clusters. 
(Botong Huang via Subru).

(cherry picked from commit 73bb2102ce4b82b3a3bed91319f7c8f067ddc3e8)
(cherry picked from commit 859aa1f9d621d07693825e610bdc0149f7a2770a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9476d86c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9476d86c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9476d86c

Branch: refs/heads/branch-2
Commit: 9476d86ce869b51fc7524ae58dd53862bc2d7d72
Parents: 7f00f93
Author: Subru Krishnan 
Authored: Fri May 26 16:23:38 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:47:43 2017 -0700

--
 .../apache/hadoop/yarn/util/AsyncCallback.java  |  35 ++
 .../failover/FederationProxyProviderUtil.java   | 114 ++--
 .../yarn/server/uam/UnmanagedAMPoolManager.java | 311 ++
 .../server/uam/UnmanagedApplicationManager.java | 607 +++
 .../hadoop/yarn/server/uam/package-info.java|  18 +
 .../yarn/server/utils/AMRMClientUtils.java  | 189 ++
 .../server/utils/YarnServerSecurityUtils.java   |  41 +-
 .../yarn/server/MockResourceManagerFacade.java  |  10 +-
 .../uam/TestUnmanagedApplicationManager.java| 335 ++
 .../amrmproxy/DefaultRequestInterceptor.java|  30 +-
 .../ApplicationMasterService.java   |  12 +-
 .../TestApplicationMasterLauncher.java  |   6 +-
 12 files changed, 1590 insertions(+), 118 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9476d86c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AsyncCallback.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AsyncCallback.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AsyncCallback.java
new file mode 100644
index 000..b4f75c9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AsyncCallback.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+/**
+ * Generic interface that can be used for calling back when a corresponding
+ * asynchronous operation completes.
+ *
+ * @param  parameter type for the callback
+ */
+public interface AsyncCallback {
+  /**
+   * This method is called back when the corresponding asynchronous operation
+   * completes.
+   *
+   * @param response response of the callback
+   */
+  void callback(T response);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9476d86c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
index 18f1338..3931f2b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationProxyProviderUtil.java
@@ -19,22 +19,20 @@
 package org.apache.hadoop.yarn.server.federation.failover;
 
 import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import 

[31/50] [abbrv] hadoop git commit: YARN-6666. Fix unit test failure in TestRouterClientRMService. (Botong Huang via Subru).

2017-09-21 Thread curino
YARN-. Fix unit test failure in TestRouterClientRMService. (Botong Huang 
via Subru).

(cherry picked from commit e750907d0a7e2fb4b33a7c876eaa4d9122a1deea)
(cherry picked from commit 42949a6e0108ed1d92a34a5ebb547691bbf16929)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9fdc24e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9fdc24e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9fdc24e

Branch: refs/heads/branch-2
Commit: b9fdc24e43ba61ebb4b276259e6d2c13da746c04
Parents: e4832be
Author: Subru Krishnan 
Authored: Tue May 30 13:37:37 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:49:48 2017 -0700

--
 .../yarn/server/MockResourceManagerFacade.java  | 435 +--
 1 file changed, 311 insertions(+), 124 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fdc24e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
index c4a4002..4bdff64 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/MockResourceManagerFacade.java
@@ -16,19 +16,22 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
+package org.apache.hadoop.yarn.server;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
-import com.google.common.base.Strings;
-import org.apache.commons.lang.NotImplementedException;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
@@ -93,8 +96,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityReque
 import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse;
 import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse;
-import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import 
org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerResponsePBImpl;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
@@ -106,33 +108,66 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.ReservationAllocationState;
+import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import 
org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException;
+import 
org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
+import 

[50/50] [abbrv] hadoop git commit: [YARN FEDERATION BACKPORT] Fix compilation issues due to: hadoop-router/pom.xml versions and Java 1.7

2017-09-21 Thread curino
[YARN FEDERATION BACKPORT] Fix compilation issues due to: hadoop-router/pom.xml 
versions and Java 1.7


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d11be2dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d11be2dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d11be2dc

Branch: refs/heads/branch-2
Commit: d11be2dcac8116d1b7244112e85b91955a3627a4
Parents: baa2bec
Author: Carlo Curino 
Authored: Thu Sep 21 17:56:46 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 17:56:46 2017 -0700

--
 .../LocalityMulticastAMRMProxyPolicy.java   |   3 +-
 .../yarn/server/utils/AMRMClientUtils.java  |   2 +-
 .../policies/BaseFederationPoliciesTest.java|   5 +-
 .../utils/FederationPoliciesTestUtil.java   |  21 ++-
 .../server/resourcemanager/ResourceManager.java |   4 +-
 .../hadoop-yarn-server-router/pom.xml   |   4 +-
 .../router/webapp/RouterWebServiceUtil.java |   7 +-
 .../server/router/webapp/RouterWebServices.java | 132 +++
 .../router/rmadmin/BaseRouterRMAdminTest.java   |   2 +-
 .../webapp/TestRouterWebServicesREST.java   |  14 +-
 10 files changed, 79 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d11be2dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 454962f..f50d3b0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -494,7 +494,8 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   .checkArgument(!ResourceRequest.isAnyLocation(rr.getResourceName()));
 
   if (!countContainersPerRM.containsKey(rr.getAllocationRequestId())) {
-countContainersPerRM.put(rr.getAllocationRequestId(), new HashMap<>());
+countContainersPerRM.put(rr.getAllocationRequestId(),
+new HashMap());
   }
   if (!countContainersPerRM.get(rr.getAllocationRequestId())
   .containsKey(targetId)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d11be2dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
index 7993bd8..9f15d90 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/AMRMClientUtils.java
@@ -161,7 +161,7 @@ public final class AMRMClientUtils {
   final Token token) throws IOException {
 try {
   String rmClusterId = configuration.get(YarnConfiguration.RM_CLUSTER_ID,
-  YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
+  "yarn_cluster");
   LOG.info("Creating RMProxy to RM {} for protocol {} for user {}",
   rmClusterId, protocol.getSimpleName(), user);
   if (token != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d11be2dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/BaseFederationPoliciesTest.java
 

[04/50] [abbrv] hadoop git commit: YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo Curino via Subru).

2017-09-21 Thread curino
YARN-5323. Policies APIs for Federation Router and AMRMProxy policies. (Carlo 
Curino via Subru).

(cherry picked from commit f8208fe0b536f29aa65af71d20c3b3e3765679fd)
(cherry picked from commit 81472778d7ef013ea6b2714bc734bc6fc3ca32fa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23c42408
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23c42408
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23c42408

Branch: refs/heads/branch-2
Commit: 23c42408ba2722f8298a921614a5d2080ef8fe7b
Parents: aac8755
Author: Subru Krishnan 
Authored: Wed Sep 7 17:33:34 2016 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:23:41 2017 -0700

--
 .../policies/ConfigurableFederationPolicy.java  |  44 +++
 .../policies/FederationPolicyConfigurator.java  |  91 +
 .../FederationPolicyInitializationContext.java  | 109 
 ...ionPolicyInitializationContextValidator.java |  82 
 .../policies/FederationPolicyWriter.java|  45 +++
 .../amrmproxy/FederationAMRMProxyPolicy.java|  66 ++
 .../policies/amrmproxy/package-info.java|  20 +++
 .../exceptions/FederationPolicyException.java   |  33 +
 ...FederationPolicyInitializationException.java |  33 +
 .../NoActiveSubclustersException.java   |  27 
 .../exceptions/UnknownSubclusterException.java  |  28 
 .../policies/exceptions/package-info.java   |  20 +++
 .../federation/policies/package-info.java   |  20 +++
 .../policies/router/FederationRouterPolicy.java |  45 +++
 .../policies/router/package-info.java   |  20 +++
 ...ionPolicyInitializationContextValidator.java | 128 +++
 .../utils/FederationPoliciesTestUtil.java   |  83 
 17 files changed, 894 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23c42408/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
new file mode 100644
index 000..fd6ceea
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/ConfigurableFederationPolicy.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.federation.policies;
+
+import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
+
+/**
+ * This interface provides a general method to reinitialize a policy. The
+ * semantics are try-n-swap, so in case of an exception is thrown the
+ * implmentation must ensure the previous state and configuration is preserved.
+ */
+public interface ConfigurableFederationPolicy {
+
+  /**
+   * This method is invoked to initialize of update the configuration of
+   * policies. The implementor should provide try-n-swap semantics, and retain
+   * state if possible.
+   *
+   * @param federationPolicyInitializationContext the new context to provide to
+   *  implementor.
+   *
+   * @throws FederationPolicyInitializationException in case the initialization
+   * fails.
+   */
+  void reinitialize(
+  FederationPolicyInitializationContext
+  federationPolicyInitializationContext)
+  throws FederationPolicyInitializationException;
+}


[44/50] [abbrv] hadoop git commit: YARN-5412. Create a proxy chain for ResourceManager REST API in the Router. (Contributed by Giovanni Matteo Fumarola via curino)

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd967d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
new file mode 100644
index 000..ea985a2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.router.webapp;
+
+import java.io.IOException;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Response;
+
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo;
+import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
+import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
+
+/**
+ * Mock intercepter that does not do anything other than forwarding it to the
+ * next intercepter in the chain.
+ */
+public class PassThroughRESTRequestInterceptor
+extends AbstractRESTRequestInterceptor {
+
+  @Override
+  public AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId) {
+return getNextInterceptor().getAppAttempts(hsr, appId);
+  }
+
+  @Override
+  public AppAttemptInfo getAppAttempt(HttpServletRequest req,
+  HttpServletResponse res, String appId, String appAttemptId) {
+return getNextInterceptor().getAppAttempt(req, res, 

[17/50] [abbrv] hadoop git commit: YARN-6190. Validation and synchronization fixes in LocalityMulticastAMRMProxyPolicy. (Botong Huang via curino)

2017-09-21 Thread curino
YARN-6190. Validation and synchronization fixes in 
LocalityMulticastAMRMProxyPolicy. (Botong Huang via curino)

(cherry picked from commit 5c486961cd3a175b122ef86275c99b72964f2c50)
(cherry picked from commit 8623644f4599f51d34ba79c4c1453b3997205d8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6191fac9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6191fac9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6191fac9

Branch: refs/heads/branch-2
Commit: 6191fac914d35db18e0ef7a4364abe07a156c7e2
Parents: 2dca88b
Author: Carlo Curino 
Authored: Tue Feb 28 17:04:20 2017 -0800
Committer: Carlo Curino 
Committed: Thu Sep 21 16:25:08 2017 -0700

--
 .../LocalityMulticastAMRMProxyPolicy.java   | 63 +---
 .../TestLocalityMulticastAMRMProxyPolicy.java   | 21 ++-
 .../policies/manager/BasePolicyManagerTest.java |  3 -
 .../resolver/TestDefaultSubClusterResolver.java |  9 ++-
 .../utils/FederationPoliciesTestUtil.java   |  6 +-
 5 files changed, 73 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6191fac9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
index 283f89e..6f97a51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java
@@ -32,6 +32,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import 
org.apache.hadoop.yarn.server.federation.policies.FederationPolicyInitializationContext;
 import 
org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
 import 
org.apache.hadoop.yarn.server.federation.policies.exceptions.FederationPolicyInitializationException;
@@ -143,10 +144,9 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 Map newWeightsConverted = new HashMap<>();
 boolean allInactive = true;
 WeightedPolicyInfo policy = getPolicyInfo();
-if (policy.getAMRMPolicyWeights() == null
-|| policy.getAMRMPolicyWeights().size() == 0) {
-  allInactive = false;
-} else {
+
+if (policy.getAMRMPolicyWeights() != null
+&& policy.getAMRMPolicyWeights().size() > 0) {
   for (Map.Entry e : policy.getAMRMPolicyWeights()
   .entrySet()) {
 if (e.getValue() > 0) {
@@ -180,7 +180,6 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 
 this.federationFacade =
 policyContext.getFederationStateStoreFacade();
-this.bookkeeper = new AllocationBookkeeper();
 this.homeSubcluster = policyContext.getHomeSubcluster();
 
   }
@@ -197,7 +196,9 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
   List resourceRequests) throws YarnException {
 
 // object used to accumulate statistics about the answer, initialize with
-// active subclusters.
+// active subclusters. Create a new instance per call because this method
+// can be called concurrently.
+bookkeeper = new AllocationBookkeeper();
 bookkeeper.reinitialize(federationFacade.getSubClusters(true));
 
 List nonLocalizedRequests =
@@ -238,12 +239,16 @@ public class LocalityMulticastAMRMProxyPolicy extends 
AbstractAMRMProxyPolicy {
 // we log altogether later
   }
   if (targetIds != null && targetIds.size() > 0) {
+boolean hasActive = false;
 for (SubClusterId tid : targetIds) {
   if (bookkeeper.isActiveAndEnabled(tid)) {
 bookkeeper.addRackRR(tid, rr);
+hasActive = true;
   }
 }
-continue;
+if (hasActive) {

[27/50] [abbrv] hadoop git commit: YARN-5413. Create a proxy chain for ResourceManager Admin API in the Router. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f00f938/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
new file mode 100644
index 000..11786e6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
@@ -0,0 +1,219 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.router.rmadmin;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
+import 
org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
+import 
org.apache.hadoop.yarn.server.router.rmadmin.RouterRMAdminService.RequestInterceptorChainWrapper;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test class to validate the RMAdmin Service inside the Router.
+ */
+public class TestRouterRMAdminService extends BaseRouterRMAdminTest {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestRouterRMAdminService.class);
+
+  /**
+   * Tests if the pipeline is created properly.
+   */
+  @Test
+  public void testRequestInterceptorChainCreation() throws Exception {
+RMAdminRequestInterceptor root =
+super.getRouterRMAdminService().createRequestInterceptorChain();
+int index = 0;
+while (root != null) {
+  // The current pipeline is:
+  // PassThroughRMAdminRequestInterceptor - index = 0
+  // PassThroughRMAdminRequestInterceptor - index = 1
+  // PassThroughRMAdminRequestInterceptor - index = 2
+  // MockClientRequestInterceptor - index = 3
+  switch (index) {
+  case 0: // Fall to the next case
+  case 1: // Fall to the next case
+  case 2:
+// If index is equal to 0,1 or 2 we fall in this check
+Assert.assertEquals(
+PassThroughRMAdminRequestInterceptor.class.getName(),
+root.getClass().getName());
+break;
+  case 3:
+Assert.assertEquals(MockRMAdminRequestInterceptor.class.getName(),
+root.getClass().getName());
+break;
+  default:
+Assert.fail();
+  }
+  root = root.getNextInterceptor();
+  index++;
+}
+Assert.assertEquals("The number of interceptors in chain does not match", 
4,
+index);
+  }
+
+  /**
+   * Test if the RouterRMAdmin forwards all the requests to the MockRM and get
+   * back the responses.
+   */
+  @Test
+  public void testRouterRMAdminServiceE2E() throws Exception {
+
+String 

[41/50] [abbrv] hadoop git commit: Bumping up yarn-server-router (new) module pom to beta1 and fixing imports post rebase.

2017-09-21 Thread curino
Bumping up yarn-server-router (new) module pom to beta1 and fixing imports post 
rebase.

(cherry picked from commit f1eff212c5f13c62e8fc45b0af794b5bbeb577da)
(cherry picked from commit 712e97d4cfab15bec4cf4b11cc067b8f85c8bec8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58e2458c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58e2458c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58e2458c

Branch: refs/heads/branch-2
Commit: 58e2458c2e8250cf028e0fc01e06dfd9670f86be
Parents: fdbd214
Author: Subru Krishnan 
Authored: Fri Jul 14 12:02:38 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:55:41 2017 -0700

--
 .../yarn/server/resourcemanager/ApplicationMasterService.java  | 1 +
 .../hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java  | 1 +
 .../hadoop-yarn-server/hadoop-yarn-server-router/pom.xml   | 2 +-
 3 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e2458c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index aa4d620..afa005e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security
 .AMRMTokenSecretManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 import org.apache.hadoop.yarn.server.security.MasterKeyData;
+import org.apache.hadoop.yarn.server.utils.AMRMClientUtils;
 import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e2458c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index 465ff64..35340e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -48,6 +48,7 @@ import 
org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import 
org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
 import org.apache.hadoop.yarn.conf.HAUtil;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.EventHandler;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58e2458c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
index f27b2b2..78e5e59 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
@@ -24,7 +24,7 @@
   4.0.0
   org.apache.hadoop
   hadoop-yarn-server-router
-  3.0.0-alpha4-SNAPSHOT
+  3.0.0-beta1-SNAPSHOT
   

[47/50] [abbrv] hadoop git commit: YARN-6902. Update Microsoft JDBC Driver for SQL Server version in License.txt. (Botong Huang via Subru).

2017-09-21 Thread curino
YARN-6902. Update Microsoft JDBC Driver for SQL Server version in License.txt. 
(Botong Huang via Subru).

(cherry picked from commit c581e9438444966345613cb6e0585482936a783a)
(cherry picked from commit 894ff83dd6e773645ab7f0ce7dfbbaa3453df26d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/190b79af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/190b79af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/190b79af

Branch: refs/heads/branch-2
Commit: 190b79af8c07968b30bf32bdbc8ea31cee66d0dc
Parents: bfd967d
Author: Subru Krishnan 
Authored: Fri Jul 28 18:46:06 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 17:15:31 2017 -0700

--
 LICENSE.txt | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/190b79af/LICENSE.txt
--
diff --git a/LICENSE.txt b/LICENSE.txt
index b25ec5a..52642b0 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -582,7 +582,8 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
 
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
-Microsoft SQLServer - JDBC version 6.1.0.jre7
+Apache HBase - Server which contains JQuery minified javascript library 
version 1.8.3
+Microsoft JDBC Driver for SQLServer - version 6.2.1.jre7
 

 
 Copyright jQuery Foundation and other contributors, https://jquery.org/


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-6203: Occasional test failure in TestWeightedRandomRouterPolicy (curino)

2017-09-21 Thread curino
YARN-6203: Occasional test failure in TestWeightedRandomRouterPolicy (curino)

(cherry picked from commit 121d55bd29ac44289afad9a15e8737397ede298b)
(cherry picked from commit c4bbdfeb666f1f3f2fd4c9437f10b233ebedc2c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c38ac05b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c38ac05b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c38ac05b

Branch: refs/heads/branch-2
Commit: c38ac05bb4a77b44768406fb7851e7c0b871d5a5
Parents: b9dcf92
Author: Carlo Curino 
Authored: Thu Apr 13 12:09:39 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:25:11 2017 -0700

--
 .../router/TestWeightedRandomRouterPolicy.java| 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c38ac05b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java
index 34cc5f8..09173e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/router/TestWeightedRandomRouterPolicy.java
@@ -50,8 +50,9 @@ public class TestWeightedRandomRouterPolicy extends 
BaseRouterPoliciesTest {
 Map routerWeights = new HashMap<>();
 Map amrmWeights = new HashMap<>();
 
-// simulate 20 subclusters with a 5% chance of being inactive
-for (int i = 0; i < 20; i++) {
+float numSubClusters = 20;
+// simulate N subclusters each with a 5% chance of being inactive
+for (int i = 0; i < numSubClusters; i++) {
   SubClusterIdInfo sc = new SubClusterIdInfo("sc" + i);
   // with 5% omit a subcluster
   if (getRand().nextFloat() < 0.95f) {
@@ -60,8 +61,12 @@ public class TestWeightedRandomRouterPolicy extends 
BaseRouterPoliciesTest {
 when(sci.getSubClusterId()).thenReturn(sc.toId());
 getActiveSubclusters().put(sc.toId(), sci);
   }
-  // 5% chance we omit one of the weights
-  float weight = getRand().nextFloat();
+
+  // 80% of the weight is evenly spread, 20% is randomly generated
+  float weight =
+  (0.8f * 1f / numSubClusters) + (0.2f * getRand().nextFloat());
+
+  // also 5% chance we omit one of the weights
   if (i <= 5 || getRand().nextFloat() > 0.05f) {
 routerWeights.put(sc, weight);
 amrmWeights.put(sc, weight);
@@ -89,7 +94,7 @@ public class TestWeightedRandomRouterPolicy extends 
BaseRouterPoliciesTest {
   counter.put(id.toId(), new AtomicLong(0));
 }
 
-float numberOfDraws = 10;
+float numberOfDraws = 1;
 
 for (float i = 0; i < numberOfDraws; i++) {
   SubClusterId chosenId = ((FederationRouterPolicy) getPolicy())
@@ -118,8 +123,7 @@ public class TestWeightedRandomRouterPolicy extends 
BaseRouterPoliciesTest {
 Assert.assertTrue(
 "Id " + counterEntry.getKey() + " Actual weight: " + actualWeight
 + " expected weight: " + expectedWeight,
-expectedWeight == 0 || (actualWeight / expectedWeight) < 1.2
-&& (actualWeight / expectedWeight) > 0.8);
+Math.abs(actualWeight - expectedWeight) < 0.01);
   } else {
 Assert
 .assertTrue(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-6821. Move FederationStateStore SQL DDL files from test resource to sbin.

2017-09-21 Thread curino
YARN-6821. Move FederationStateStore SQL DDL files from test resource to sbin.

(cherry picked from commit cd9db822f0c1efc52005b1c069d52910d88038d9)
(cherry picked from commit 3e1dc7ece0303f4d06816b8c3c1f2fae14db8eff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdbd214d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdbd214d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdbd214d

Branch: refs/heads/branch-2
Commit: fdbd214d4ac606b047e03654f7fe7fe6afa1aef3
Parents: 055138c
Author: Subru Krishnan 
Authored: Thu Jul 13 18:53:21 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:55:40 2017 -0700

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |   1 +
 .../FederationStateStoreStoreProcs.sql  | 511 +++
 .../SQLServer/FederationStateStoreTables.sql| 122 +
 .../FederationStateStoreStoreProcs.sql  | 511 ---
 .../SQLServer/FederationStateStoreTables.sql| 122 -
 5 files changed, 634 insertions(+), 633 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdbd214d/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 0080112..a3a05e4 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -56,6 +56,7 @@
 stop-yarn.sh
 start-yarn.cmd
 stop-yarn.cmd
+FederationStateStore**/**
   
   0755
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdbd214d/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
new file mode 100644
index 000..66d6f0e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/SQLServer/FederationStateStoreStoreProcs.sql
@@ -0,0 +1,511 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+USE [FederationStateStore]
+GO
+
+IF OBJECT_ID ( '[sp_addApplicationHomeSubCluster]', 'P' ) IS NOT NULL
+DROP PROCEDURE [sp_addApplicationHomeSubCluster];
+GO
+
+CREATE PROCEDURE [dbo].[sp_addApplicationHomeSubCluster]
+@applicationId VARCHAR(64),
+@homeSubCluster VARCHAR(256),
+@storedHomeSubCluster VARCHAR(256) OUTPUT,
+@rowCount int OUTPUT
+AS BEGIN
+DECLARE @errorMessage nvarchar(4000)
+
+BEGIN TRY
+BEGIN TRAN
+-- If application to sub-cluster map doesn't exist, insert it.
+-- Otherwise don't change the current mapping.
+IF NOT EXISTS (SELECT TOP 1 *
+   FROM [dbo].[applicationsHomeSubCluster]
+   WHERE [applicationId] = @applicationId)
+
+INSERT INTO [dbo].[applicationsHomeSubCluster] (
+[applicationId],
+[homeSubCluster])
+VALUES (
+@applicationId,
+@homeSubCluster);
+-- End of the IF block
+
+SELECT @rowCount = @@ROWCOUNT;
+
+SELECT @storedHomeSubCluster = [homeSubCluster]
+FROM [dbo].[applicationsHomeSubCluster]
+WHERE [applicationId] = @applicationId;
+
+COMMIT TRAN
+END TRY
+
+BEGIN CATCH
+ROLLBACK TRAN
+
+SET @errorMessage = dbo.func_FormatErrorMessage(ERROR_MESSAGE(), 
ERROR_LINE())
+
+/*  raise error and terminate the execution */
+RAISERROR(@errorMessage, --- Error Message
+1, -- Severity
+   

[33/50] [abbrv] hadoop git commit: YARN-3666. Federation Intercepting and propagating AM- home RM communications. (Botong Huang via Subru).

2017-09-21 Thread curino
YARN-3666. Federation Intercepting and propagating AM- home RM communications. 
(Botong Huang via Subru).

(cherry picked from commit 2399eb8200609246cb623c74450ca4a2032063cc)
(cherry picked from commit bed1832c934fe4ba44efdcdc49fce06457dc3d4f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ede8c1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ede8c1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ede8c1a

Branch: refs/heads/branch-2
Commit: 7ede8c1a53e6c2fc30dfc2923929890049bb62a9
Parents: 7dd6caf
Author: Subru Krishnan 
Authored: Wed May 31 13:21:09 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:49:54 2017 -0700

--
 .../dev-support/findbugs-exclude.xml|   7 +
 .../amrmproxy/FederationInterceptor.java| 510 +++
 .../amrmproxy/TestAMRMProxyService.java |   1 +
 .../amrmproxy/TestFederationInterceptor.java| 167 ++
 .../TestableFederationInterceptor.java  | 133 +
 5 files changed, 818 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ede8c1a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 4c4298d..73f1038 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -567,4 +567,11 @@
 
   
 
+  
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ede8c1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
new file mode 100644
index 000..5f82d69
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java
@@ -0,0 +1,510 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.amrmproxy;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import 

[30/50] [abbrv] hadoop git commit: Bumping up hadoop-yarn-server-router module to 3.0.0-alpha4 post rebase.

2017-09-21 Thread curino
Bumping up hadoop-yarn-server-router module to 3.0.0-alpha4 post rebase.

(cherry picked from commit bd9c7b1bec3293fe338553c0d5c3612e6176fb26)
(cherry picked from commit f317e0cb48c16e072aa259a0a7ed71a05274bb80)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4832bef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4832bef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4832bef

Branch: refs/heads/branch-2
Commit: e4832bef6d38894703bc3f0e6e6a9496801eeb84
Parents: 9476d86
Author: Subru Krishnan 
Authored: Fri May 26 17:10:03 2017 -0700
Committer: Carlo Curino 
Committed: Thu Sep 21 16:47:50 2017 -0700

--
 .../hadoop-yarn-server/hadoop-yarn-server-router/pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4832bef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
index 89813de..f9169e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/pom.xml
@@ -19,12 +19,12 @@
   
 hadoop-yarn-server
 org.apache.hadoop
-3.0.0-alpha3-SNAPSHOT
+3.0.0-alpha4-SNAPSHOT
   
   4.0.0
   org.apache.hadoop
   hadoop-yarn-server-router
-  3.0.0-alpha3-SNAPSHOT
+  3.0.0-alpha4-SNAPSHOT
   Apache Hadoop YARN Router
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: YARN-3663. Federation State and Policy Store (DBMS implementation). (Giovanni Matteo Fumarola via curino).

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c5dfa0f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
index 80b00ef..db04592 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java
@@ -19,13 +19,14 @@ package org.apache.hadoop.yarn.server.federation.store.impl;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.Calendar;
 import java.util.List;
+import java.util.TimeZone;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.federation.store.FederationStateStore;
-import 
org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreErrorCode;
 import 
org.apache.hadoop.yarn.server.federation.store.exception.FederationStateStoreException;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterRequest;
 import 
org.apache.hadoop.yarn.server.federation.store.records.AddApplicationHomeSubClusterResponse;
@@ -87,13 +88,26 @@ public abstract class FederationStateStoreBaseTest {
   @Test
   public void testRegisterSubCluster() throws Exception {
 SubClusterId subClusterId = SubClusterId.newInstance("SC");
+
 SubClusterInfo subClusterInfo = createSubClusterInfo(subClusterId);
 
+long previousTimeStamp =
+Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
+
 SubClusterRegisterResponse result = stateStore.registerSubCluster(
 SubClusterRegisterRequest.newInstance(subClusterInfo));
 
+long currentTimeStamp =
+Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
+
 Assert.assertNotNull(result);
 Assert.assertEquals(subClusterInfo, querySubClusterInfo(subClusterId));
+
+// The saved heartbeat is between the old one and the current timestamp
+Assert.assertTrue(querySubClusterInfo(subClusterId)
+.getLastHeartBeat() <= currentTimeStamp);
+Assert.assertTrue(querySubClusterInfo(subClusterId)
+.getLastHeartBeat() >= previousTimeStamp);
   }
 
   @Test
@@ -120,9 +134,7 @@ public abstract class FederationStateStoreBaseTest {
   stateStore.deregisterSubCluster(deregisterRequest);
   Assert.fail();
 } catch (FederationStateStoreException e) {
-  Assert.assertEquals(
-  FederationStateStoreErrorCode.MEMBERSHIP_UPDATE_DEREGISTER_FAIL,
-  e.getCode());
+  Assert.assertTrue(e.getMessage().startsWith("SubCluster SC not found"));
 }
   }
 
@@ -149,9 +161,8 @@ public abstract class FederationStateStoreBaseTest {
   stateStore.getSubCluster(request).getSubClusterInfo();
   Assert.fail();
 } catch (FederationStateStoreException e) {
-  Assert.assertEquals(
-  FederationStateStoreErrorCode.MEMBERSHIP_SINGLE_SELECT_FAIL,
-  e.getCode());
+  Assert.assertTrue(
+  e.getMessage().startsWith("SubCluster SC does not exist"));
 }
   }
 
@@ -200,13 +211,24 @@ public abstract class FederationStateStoreBaseTest {
 SubClusterId subClusterId = SubClusterId.newInstance("SC");
 registerSubCluster(createSubClusterInfo(subClusterId));
 
+long previousHeartBeat =
+querySubClusterInfo(subClusterId).getLastHeartBeat();
+
 SubClusterHeartbeatRequest heartbeatRequest = SubClusterHeartbeatRequest
 .newInstance(subClusterId, SubClusterState.SC_RUNNING, "capability");
 stateStore.subClusterHeartbeat(heartbeatRequest);
 
+long currentTimeStamp =
+Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
+
 Assert.assertEquals(SubClusterState.SC_RUNNING,
 querySubClusterInfo(subClusterId).getState());
-Assert.assertNotNull(querySubClusterInfo(subClusterId).getLastHeartBeat());
+
+// The saved heartbeat is between the old one and the current timestamp
+Assert.assertTrue(querySubClusterInfo(subClusterId)
+.getLastHeartBeat() <= currentTimeStamp);
+Assert.assertTrue(querySubClusterInfo(subClusterId)
+.getLastHeartBeat() >= previousHeartBeat);
   }
 
   @Test
@@ 

[25/50] [abbrv] hadoop git commit: YARN-5411. Create a proxy chain for ApplicationClientProtocol in the Router. (Giovanni Matteo Fumarola via Subru).

2017-09-21 Thread curino
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc0a2e6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
new file mode 100644
index 000..12b933b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/DefaultClientRequestInterceptor.java
@@ -0,0 +1,334 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.router.clientrm;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
+import 
org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
+import 

  1   2   >