hadoop git commit: YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws NPE. Contributed by Sunil G

2016-04-05 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 818d6b799 -> 552237d4a


YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws 
NPE. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/552237d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/552237d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/552237d4

Branch: refs/heads/trunk
Commit: 552237d4a34ab10fa5f9ec7aad7942f2a110993e
Parents: 818d6b7
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:25:32 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:26:19 2016 +0530

--
 .../resourcemanager/recovery/TestZKRMStateStorePerf.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/552237d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
index 4b0b06a..bd25def 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
@@ -91,7 +91,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 if (appTokenMgr != null) {
   appTokenMgr.stop();
 }
-curatorTestingServer.stop();
+if (curatorTestingServer != null) {
+  curatorTestingServer.stop();
+}
   }
 
   private void initStore(String hostPort) {
@@ -99,8 +101,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 RMContext rmContext = mock(RMContext.class);
 
 conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_ZK_ADDRESS,
-optHostPort.or(curatorTestingServer.getConnectString()));
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort
+.or((curatorTestingServer == null) ? "" : curatorTestingServer
+.getConnectString()));
 conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
 
 store = new ZKRMStateStore();



hadoop git commit: YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws NPE. Contributed by Sunil G

2016-04-05 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ad08114b9 -> eec23580b


YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws 
NPE. Contributed by Sunil G

(cherry picked from commit 552237d4a34ab10fa5f9ec7aad7942f2a110993e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eec23580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eec23580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eec23580

Branch: refs/heads/branch-2
Commit: eec23580b4c5a9f5c9077e0e5c7860d99fe7f4bc
Parents: ad08114
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:25:32 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:37:31 2016 +0530

--
 .../resourcemanager/recovery/TestZKRMStateStorePerf.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eec23580/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
index 4b0b06a..bd25def 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
@@ -91,7 +91,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 if (appTokenMgr != null) {
   appTokenMgr.stop();
 }
-curatorTestingServer.stop();
+if (curatorTestingServer != null) {
+  curatorTestingServer.stop();
+}
   }
 
   private void initStore(String hostPort) {
@@ -99,8 +101,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 RMContext rmContext = mock(RMContext.class);
 
 conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_ZK_ADDRESS,
-optHostPort.or(curatorTestingServer.getConnectString()));
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort
+.or((curatorTestingServer == null) ? "" : curatorTestingServer
+.getConnectString()));
 conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
 
 store = new ZKRMStateStore();



hadoop git commit: YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin A Chundatt

2016-04-05 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 552237d4a -> 776b549e2


YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin 
A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/776b549e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/776b549e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/776b549e

Branch: refs/heads/trunk
Commit: 776b549e2ac20a68a5513cbcaac0edc33233dc03
Parents: 552237d
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:47:25 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:47:25 2016 +0530

--
 .../resourcemanager/webapp/NodesPage.java   | 53 +---
 .../resourcemanager/webapp/TestNodesPage.java   | 37 --
 2 files changed, 45 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/776b549e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 9603468..7063421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_LABEL;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -101,6 +100,7 @@ class NodesPage extends RmView {
   LOG.debug("Unexpected state filter for inactive RM node");
 }
   }
+  StringBuilder nodeTableData = new StringBuilder("[\n");
   for (RMNode ni : rmNodes) {
 if (stateFilter != null) {
   NodeState state = ni.getState();
@@ -129,27 +129,40 @@ class NodesPage extends RmView {
 NodeInfo info = new NodeInfo(ni, sched);
 int usedMemory = (int) info.getUsedMemory();
 int availableMemory = (int) info.getAvailableMemory();
-TR>> row =
-tbody.tr().td(StringUtils.join(",", info.getNodeLabels()))
-.td(info.getRack()).td(info.getState()).td(info.getNodeId());
+nodeTableData.append("[\"")
+.append(StringUtils.join(",", 
info.getNodeLabels())).append("\",\"")
+.append(info.getRack()).append("\",\"").append(info.getState())
+.append("\",\"").append(info.getNodeId());
 if (isInactive) {
-  row.td()._("N/A")._();
+  nodeTableData.append("\",\"").append("N/A").append("\",\"");
 } else {
   String httpAddress = info.getNodeHTTPAddress();
-  row.td().a("//" + httpAddress, httpAddress)._();
+  nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\"");
 }
-row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._()
-._(Times.format(info.getLastHealthUpdate()))._()
-.td(info.getHealthReport())
-.td(String.valueOf(info.getNumContainers())).td().br()
-.$title(String.valueOf(usedMemory))._()
-._(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().td().br()
-.$title(String.valueOf(availableMemory))._()
-._(StringUtils.byteDesc(availableMemory * BYTES_IN_MB))._()
-.td(String.valueOf(info.getUsedVirtualCores()))
-.td(String.valueOf(info.getAvailableVirtualCores()))
-.td(ni.getNodeManagerVersion())._();
+nodeTableData.append("")
+  

hadoop git commit: YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin A Chundatt

2016-04-05 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eec23580b -> 13a4e25f2


YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin 
A Chundatt

(cherry picked from commit 776b549e2ac20a68a5513cbcaac0edc33233dc03)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13a4e25f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13a4e25f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13a4e25f

Branch: refs/heads/branch-2
Commit: 13a4e25f26e22f65f60ce14011eb20b9def7bee0
Parents: eec2358
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:47:25 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:53:24 2016 +0530

--
 .../resourcemanager/webapp/NodesPage.java   | 53 +---
 .../resourcemanager/webapp/TestNodesPage.java   | 37 --
 2 files changed, 45 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13a4e25f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 9603468..7063421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_LABEL;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -101,6 +100,7 @@ class NodesPage extends RmView {
   LOG.debug("Unexpected state filter for inactive RM node");
 }
   }
+  StringBuilder nodeTableData = new StringBuilder("[\n");
   for (RMNode ni : rmNodes) {
 if (stateFilter != null) {
   NodeState state = ni.getState();
@@ -129,27 +129,40 @@ class NodesPage extends RmView {
 NodeInfo info = new NodeInfo(ni, sched);
 int usedMemory = (int) info.getUsedMemory();
 int availableMemory = (int) info.getAvailableMemory();
-TR>> row =
-tbody.tr().td(StringUtils.join(",", info.getNodeLabels()))
-.td(info.getRack()).td(info.getState()).td(info.getNodeId());
+nodeTableData.append("[\"")
+.append(StringUtils.join(",", 
info.getNodeLabels())).append("\",\"")
+.append(info.getRack()).append("\",\"").append(info.getState())
+.append("\",\"").append(info.getNodeId());
 if (isInactive) {
-  row.td()._("N/A")._();
+  nodeTableData.append("\",\"").append("N/A").append("\",\"");
 } else {
   String httpAddress = info.getNodeHTTPAddress();
-  row.td().a("//" + httpAddress, httpAddress)._();
+  nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\"");
 }
-row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._()
-._(Times.format(info.getLastHealthUpdate()))._()
-.td(info.getHealthReport())
-.td(String.valueOf(info.getNumContainers())).td().br()
-.$title(String.valueOf(usedMemory))._()
-._(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().td().br()
-.$title(String.valueOf(availableMemory))._()
-._(StringUtils.byteDesc(availableMemory * BYTES_IN_MB))._()
-.td(String.valueOf(info.getUsedVirtualCores()))
-.td(String.valueOf(info.getAvailableVirtualCores()))
-.t

hadoop git commit: YARN-4311. Removing nodes from include and exclude lists will not remove them from decommissioned nodes list. Contributed by Kuhu Shukla

2016-04-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 776b549e2 -> 1cbcd4a49


YARN-4311. Removing nodes from include and exclude lists will not remove them 
from decommissioned nodes list. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cbcd4a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cbcd4a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cbcd4a4

Branch: refs/heads/trunk
Commit: 1cbcd4a491e6a57d466c2897335614dc6770b475
Parents: 776b549
Author: Jason Lowe 
Authored: Tue Apr 5 13:40:19 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 13:40:19 2016 +

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   9 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../src/main/resources/yarn-default.xml |  13 ++
 .../resourcemanager/NodesListManager.java   | 104 -
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../resourcemanager/ResourceTrackerService.java |   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   4 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  22 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   9 +
 .../TestResourceTrackerService.java | 216 +--
 .../webapp/TestRMWebServicesNodes.java  |  12 +-
 12 files changed, 387 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 92d586b..951f5a8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -199,6 +199,15 @@ public class NodeInfo {
 public ResourceUtilization getNodeUtilization() {
   return null;
 }
+
+@Override
+public long getUntrackedTimeStamp() {
+  return 0;
+}
+
+@Override
+public void setUntrackedTimeStamp(long timeStamp) {
+}
   }
 
   public static RMNode newNodeInfo(String rackName, String hostName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 2e9cccb..e5013c4 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -188,4 +188,13 @@ public class RMNodeWrapper implements RMNode {
   public ResourceUtilization getNodeUtilization() {
 return node.getNodeUtilization();
   }
+
+  @Override
+  public long getUntrackedTimeStamp() {
+return 0;
+  }
+
+  @Override
+  public void setUntrackedTimeStamp(long timeStamp) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acee57..66b293f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -648,6 +648,15 @@ public class YarnConfiguration extends Configuration {
   "NONE";
 
   /**
+   * Timeout(msec) for an untracked node to remain in shutdown or 
decommissioned
+   * state.
+   */
+  public static final String RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC =
+  RM_PREFIX + "node-removal-untracked.timeout-ms";
+  public static final int
+  DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC = 6;
+
+  /**
* RM proxy users' prefix
*/
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-y

hadoop git commit: YARN-4311. Removing nodes from include and exclude lists will not remove them from decommissioned nodes list. Contributed by Kuhu Shukla (cherry picked from commit 1cbcd4a491e6a57d4

2016-04-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 13a4e25f2 -> 814ceeb48


YARN-4311. Removing nodes from include and exclude lists will not remove them 
from decommissioned nodes list. Contributed by Kuhu Shukla
(cherry picked from commit 1cbcd4a491e6a57d466c2897335614dc6770b475)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/814ceeb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/814ceeb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/814ceeb4

Branch: refs/heads/branch-2
Commit: 814ceeb4893ad9ed903ebcfd30fe3158ec1c71df
Parents: 13a4e25
Author: Jason Lowe 
Authored: Tue Apr 5 13:40:19 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 13:41:18 2016 +

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   9 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../src/main/resources/yarn-default.xml |  13 ++
 .../resourcemanager/NodesListManager.java   | 104 -
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../resourcemanager/ResourceTrackerService.java |   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   4 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  22 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   9 +
 .../TestResourceTrackerService.java | 216 +--
 .../webapp/TestRMWebServicesNodes.java  |  12 +-
 12 files changed, 387 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/814ceeb4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 92d586b..951f5a8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -199,6 +199,15 @@ public class NodeInfo {
 public ResourceUtilization getNodeUtilization() {
   return null;
 }
+
+@Override
+public long getUntrackedTimeStamp() {
+  return 0;
+}
+
+@Override
+public void setUntrackedTimeStamp(long timeStamp) {
+}
   }
 
   public static RMNode newNodeInfo(String rackName, String hostName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/814ceeb4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 2e9cccb..e5013c4 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -188,4 +188,13 @@ public class RMNodeWrapper implements RMNode {
   public ResourceUtilization getNodeUtilization() {
 return node.getNodeUtilization();
   }
+
+  @Override
+  public long getUntrackedTimeStamp() {
+return 0;
+  }
+
+  @Override
+  public void setUntrackedTimeStamp(long timeStamp) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/814ceeb4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index e7d16d7..bceefe2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -648,6 +648,15 @@ public class YarnConfiguration extends Configuration {
   "NONE";
 
   /**
+   * Timeout(msec) for an untracked node to remain in shutdown or 
decommissioned
+   * state.
+   */
+  public static final String RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC =
+  RM_PREFIX + "node-removal-untracked.timeout-ms";
+  public static final int
+  DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC = 6;
+
+  /**
* RM proxy users' prefix
*/
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyus

hadoop git commit: YARN-4311. Removing nodes from include and exclude lists will not remove them from decommissioned nodes list. Contributed by Kuhu Shukla (cherry picked from commit 1cbcd4a491e6a57d4

2016-04-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3deeb9a15 -> 9d3c51eb5


YARN-4311. Removing nodes from include and exclude lists will not remove them 
from decommissioned nodes list. Contributed by Kuhu Shukla
(cherry picked from commit 1cbcd4a491e6a57d466c2897335614dc6770b475)

Conflicts:


hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d3c51eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d3c51eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d3c51eb

Branch: refs/heads/branch-2.8
Commit: 9d3c51eb5a701ae4ba296e2fd3fba154fa0097c0
Parents: 3deeb9a
Author: Jason Lowe 
Authored: Tue Apr 5 13:47:05 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 13:47:05 2016 +

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   9 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../src/main/resources/yarn-default.xml |  13 ++
 .../resourcemanager/NodesListManager.java   | 104 -
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../resourcemanager/ResourceTrackerService.java |   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   4 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  22 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   9 +
 .../TestResourceTrackerService.java | 216 +--
 .../webapp/TestRMWebServicesNodes.java  |  12 +-
 12 files changed, 387 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c51eb/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 92d586b..951f5a8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -199,6 +199,15 @@ public class NodeInfo {
 public ResourceUtilization getNodeUtilization() {
   return null;
 }
+
+@Override
+public long getUntrackedTimeStamp() {
+  return 0;
+}
+
+@Override
+public void setUntrackedTimeStamp(long timeStamp) {
+}
   }
 
   public static RMNode newNodeInfo(String rackName, String hostName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c51eb/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 2e9cccb..e5013c4 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -188,4 +188,13 @@ public class RMNodeWrapper implements RMNode {
   public ResourceUtilization getNodeUtilization() {
 return node.getNodeUtilization();
   }
+
+  @Override
+  public long getUntrackedTimeStamp() {
+return 0;
+  }
+
+  @Override
+  public void setUntrackedTimeStamp(long timeStamp) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d3c51eb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8018d1c..f907104 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -639,6 +639,15 @@ public class YarnConfiguration extends Configuration {
   "NONE";
 
   /**
+   * Timeout(msec) for an untracked node to remain in shutdown or 
decommissioned
+   * state.
+   */
+  public static final String RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC =
+  RM_PREFIX + "node-removal-untracked.timeout-ms";
+  public static final int
+  DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC = 6;
+
+  

hadoop git commit: YARN-4773. Log aggregation performs extraneous filesystem operations when rolling log aggregation is disabled. Contributed by Jun Gong

2016-04-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 fc0ffc48d -> 49428ab6b


YARN-4773. Log aggregation performs extraneous filesystem operations when 
rolling log aggregation is disabled. Contributed by Jun Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49428ab6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49428ab6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49428ab6

Branch: refs/heads/branch-2.7
Commit: 49428ab6bb25c54755b841c720f3d8ab53313544
Parents: fc0ffc4
Author: Jason Lowe 
Authored: Tue Apr 5 13:51:46 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 13:51:46 2016 +

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../containermanager/logaggregation/AppLogAggregatorImpl.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49428ab6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d26d1d3..28677af 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -115,6 +115,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4183. Clarify the behavior of timeline service config properties
 (Naganarasimha G R via sjlee)
 
+YARN-4773. Log aggregation performs extraneous filesystem operations when
+rolling log aggregation is disabled (Jun Gong via jlowe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49428ab6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 0de4288..6cf9a78 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -284,7 +284,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 
   // Before upload logs, make sure the number of existing logs
   // is smaller than the configured NM log aggregation retention size.
-  if (uploadedLogsInThisCycle) {
+  if (uploadedLogsInThisCycle && logAggregationInRolling) {
 cleanOldLogs();
   }
 



hadoop git commit: YARN-4893. Fix some intermittent test failures in TestRMAdminService. Contributed by Brahma Reddy Battula.

2016-04-05 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1cbcd4a49 -> 6be28bcc4


YARN-4893. Fix some intermittent test failures in TestRMAdminService. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6be28bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6be28bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6be28bcc

Branch: refs/heads/trunk
Commit: 6be28bcc461292b24589dae17a235b3eaadc07ed
Parents: 1cbcd4a
Author: Junping Du 
Authored: Tue Apr 5 06:57:26 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 06:57:54 2016 -0700

--
 .../org/apache/hadoop/yarn/server/resourcemanager/MockRM.java | 7 +--
 .../yarn/server/resourcemanager/TestRMAdminService.java   | 3 ---
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java | 2 --
 .../server/resourcemanager/TestResourceTrackerService.java| 6 --
 .../server/resourcemanager/rmapp/TestNodesListManager.java| 5 ++---
 5 files changed, 7 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index d5b64c1..25c558f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -603,6 +603,7 @@ public class MockRM extends ResourceManager {
   public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
 MockNM nm = new MockNM(nodeIdStr, memory, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
 
@@ -611,6 +612,7 @@ public class MockRM extends ResourceManager {
 MockNM nm =
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
   
@@ -620,6 +622,7 @@ public class MockRM extends ResourceManager {
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService(),
 YarnVersionInfo.getVersion());
 nm.registerNode(runningApplications);
+drainEvents();
 return nm;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 4513cbb..5c69411 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -27,9 +27,7 @@ import java.io.FileOutputStream;
 import j

hadoop git commit: YARN-4773. Log aggregation performs extraneous filesystem operations when rolling log aggregation is disabled. Contributed by Jun Gong

2016-04-05 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 173681a47 -> 95b870096


YARN-4773. Log aggregation performs extraneous filesystem operations when 
rolling log aggregation is disabled. Contributed by Jun Gong


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95b87009
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95b87009
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95b87009

Branch: refs/heads/branch-2.6
Commit: 95b8700969c28891c914fda982a9950153553b58
Parents: 173681a
Author: Jason Lowe 
Authored: Tue Apr 5 14:05:11 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 14:05:11 2016 +

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../containermanager/logaggregation/AppLogAggregatorImpl.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95b87009/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bccb30d..fb6ca52 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -27,6 +27,9 @@ Release 2.6.5 - UNRELEASED
 YARN-4785. inconsistent value type of the "type" field for LeafQueueInfo in
 response of RM REST API. (Varun Vasudev via junping-du)
 
+YARN-4773. Log aggregation performs extraneous filesystem operations when
+rolling log aggregation is disabled (Jun Gong via jlowe)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95b87009/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index a0f3c48..a43cc91 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -282,7 +282,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 
   // Before upload logs, make sure the number of existing logs
   // is smaller than the configured NM log aggregation retention size.
-  if (uploadedLogsInThisCycle) {
+  if (uploadedLogsInThisCycle && logAggregationInRolling) {
 cleanOldLogs();
   }
 



hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eeff2e35f -> ef3da8235


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef3da823
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef3da823
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef3da823

Branch: refs/heads/branch-2
Commit: ef3da823573cbf16fd1d84479330dd457f95e0ff
Parents: eeff2e3
Author: Kihwal Lee 
Authored: Tue Apr 5 09:07:24 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:16:05 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 33 +++-
 2 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3da823/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 1c7316a..20cecb4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3da823/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index b396762..a9791b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -521,7 +521,38 @@ public class TestDFSShell {
   }
 }
   }
-  
+
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 9d3c51eb5 -> f9764d073


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

(cherry picked from commit ef3da823573cbf16fd1d84479330dd457f95e0ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9764d07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9764d07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9764d07

Branch: refs/heads/branch-2.8
Commit: f9764d073fd8bc70ae63614f166aeac0325d6a4d
Parents: 9d3c51e
Author: Kihwal Lee 
Authored: Tue Apr 5 09:16:51 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:16:51 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 33 +++-
 2 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9764d07/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 1c7316a..20cecb4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9764d07/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index b396762..a9791b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -521,7 +521,38 @@ public class TestDFSShell {
   }
 }
   }
-  
+
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



hadoop git commit: YARN-4893. Fix some intermittent test failures in TestRMAdminService. Contributed by Brahma Reddy Battula. (cherry picked from commit 6be28bcc461292b24589dae17a235b3eaadc07ed)

2016-04-05 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 814ceeb48 -> eeff2e35f


YARN-4893. Fix some intermittent test failures in TestRMAdminService. 
Contributed by Brahma Reddy Battula.
(cherry picked from commit 6be28bcc461292b24589dae17a235b3eaadc07ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eeff2e35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eeff2e35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eeff2e35

Branch: refs/heads/branch-2
Commit: eeff2e35f8a2b35568e57846b66f602845c3bbe1
Parents: 814ceeb
Author: Junping Du 
Authored: Tue Apr 5 06:57:26 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 07:05:06 2016 -0700

--
 .../org/apache/hadoop/yarn/server/resourcemanager/MockRM.java | 7 +--
 .../yarn/server/resourcemanager/TestRMAdminService.java   | 3 ---
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java | 2 --
 .../server/resourcemanager/TestResourceTrackerService.java| 6 --
 .../server/resourcemanager/rmapp/TestNodesListManager.java| 5 ++---
 5 files changed, 7 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eeff2e35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index d5b64c1..25c558f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -603,6 +603,7 @@ public class MockRM extends ResourceManager {
   public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
 MockNM nm = new MockNM(nodeIdStr, memory, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
 
@@ -611,6 +612,7 @@ public class MockRM extends ResourceManager {
 MockNM nm =
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
   
@@ -620,6 +622,7 @@ public class MockRM extends ResourceManager {
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService(),
 YarnVersionInfo.getVersion());
 nm.registerNode(runningApplications);
+drainEvents();
 return nm;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eeff2e35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 4513cbb..5c69411 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdm

hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 49428ab6b -> 960860133


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

(cherry picked from commit ef3da823573cbf16fd1d84479330dd457f95e0ff)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96086013
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96086013
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96086013

Branch: refs/heads/branch-2.7
Commit: 9608601330520990d4470971389d574cfa09736e
Parents: 49428ab
Author: Kihwal Lee 
Authored: Tue Apr 5 09:17:57 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:20:38 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 33 +++-
 3 files changed, 40 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96086013/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 1c7316a..20cecb4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96086013/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3a77f02..bb37eb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -158,6 +158,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9917. IBR accumulate more objects when SNN was down for sometime.
 (Brahma Reddy Battula via vinayakumarb)
 
+HDFS-10239. Fsshell mv fails if port usage doesn't match in src and
+destination paths (Kuhu Shukla via kihwal)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96086013/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 828d89d..9924775 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -519,7 +519,38 @@ public class TestDFSShell {
   }
 }
   }
-  
+
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new H

hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6be28bcc4 -> 917464505


HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91746450
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91746450
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91746450

Branch: refs/heads/trunk
Commit: 917464505c0e930ebeb4c775d829e51c56a48686
Parents: 6be28bc
Author: Kihwal Lee 
Authored: Tue Apr 5 09:07:24 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:07:24 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 31 
 2 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 02a3b25..d359282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 41cd5c0..b75ac11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -559,6 +559,37 @@ public class TestDFSShell {
 }
   }
 
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



[2/2] hadoop git commit: YARN-4893. Fix some intermittent test failures in TestRMAdminService. Contributed by Brahma Reddy Battula. (cherry picked from commit 6be28bcc461292b24589dae17a235b3eaadc07ed)

2016-04-05 Thread junping_du
YARN-4893. Fix some intermittent test failures in TestRMAdminService. 
Contributed by Brahma Reddy Battula.
(cherry picked from commit 6be28bcc461292b24589dae17a235b3eaadc07ed)
(cherry picked from commit eeff2e35f8a2b35568e57846b66f602845c3bbe1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c2dc1a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c2dc1a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c2dc1a1

Branch: refs/heads/branch-2.8
Commit: 9c2dc1a194b44490a62b8c501b23fb05a8abe05f
Parents: f9764d0
Author: Junping Du 
Authored: Tue Apr 5 06:57:26 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 08:41:48 2016 -0700

--
 .../org/apache/hadoop/yarn/server/resourcemanager/MockRM.java | 7 +--
 .../yarn/server/resourcemanager/TestRMAdminService.java   | 3 ---
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java | 2 --
 .../server/resourcemanager/TestResourceTrackerService.java| 6 --
 .../server/resourcemanager/rmapp/TestNodesListManager.java| 5 ++---
 5 files changed, 7 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c2dc1a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index e0da263..b8d64e9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -585,6 +585,7 @@ public class MockRM extends ResourceManager {
   public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
 MockNM nm = new MockNM(nodeIdStr, memory, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
 
@@ -593,6 +594,7 @@ public class MockRM extends ResourceManager {
 MockNM nm =
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
   
@@ -602,6 +604,7 @@ public class MockRM extends ResourceManager {
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService(),
 YarnVersionInfo.getVersion());
 nm.registerNode(runningApplications);
+drainEvents();
 return nm;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c2dc1a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 4513cbb..5c69411 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.jav

[1/2] hadoop git commit: Small addendum to YARN-4893 to fix build failure.

2016-04-05 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 f9764d073 -> 3df56a97d


Small addendum to YARN-4893 to fix build failure.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3df56a97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3df56a97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3df56a97

Branch: refs/heads/branch-2.8
Commit: 3df56a97dfb25f09f8be944348ece0bb2bef6d19
Parents: 9c2dc1a
Author: Junping Du 
Authored: Tue Apr 5 08:41:19 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 08:41:48 2016 -0700

--
 .../yarn/server/resourcemanager/rmapp/TestNodesListManager.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df56a97/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
index 7ef8549..407ac85 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestNodesListManager.java
@@ -43,6 +43,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.util.ControlledClock;
+import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;



hadoop git commit: YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.

2016-04-05 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk 917464505 -> 000581674


YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00058167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00058167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00058167

Branch: refs/heads/trunk
Commit: 00058167431475c6e63c80207424f1d365569e3a
Parents: 9174645
Author: Junping Du 
Authored: Tue Apr 5 09:01:08 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 09:01:08 2016 -0700

--
 .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00058167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 7ce15c5..46b32de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -188,8 +188,7 @@ public class TestNMProxy extends BaseContainerManagerTest {
   Assert.fail("should get socket exception");
 } catch (IOException e) {
   // socket exception should be thrown immediately, without RPC retries.
-  Assert.assertTrue(e.toString().
-  contains("Failed on local exception: java.net.SocketException"));
+  Assert.assertTrue(e instanceof java.net.SocketException);
 }
   }
 



hadoop git commit: YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss. (cherry picked from commit 00058167431475c6e63c80207424f1d365569e3a)

2016-04-05 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ef3da8235 -> 0907ce8c9


YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.
(cherry picked from commit 00058167431475c6e63c80207424f1d365569e3a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0907ce8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0907ce8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0907ce8c

Branch: refs/heads/branch-2
Commit: 0907ce8c93600e23a96350b1a620584eb702592e
Parents: ef3da82
Author: Junping Du 
Authored: Tue Apr 5 09:01:08 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 09:02:50 2016 -0700

--
 .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0907ce8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 7ce15c5..46b32de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -188,8 +188,7 @@ public class TestNMProxy extends BaseContainerManagerTest {
   Assert.fail("should get socket exception");
 } catch (IOException e) {
   // socket exception should be thrown immediately, without RPC retries.
-  Assert.assertTrue(e.toString().
-  contains("Failed on local exception: java.net.SocketException"));
+  Assert.assertTrue(e instanceof java.net.SocketException);
 }
   }
 



hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk 000581674 -> 85ec5573e


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85ec5573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85ec5573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85ec5573

Branch: refs/heads/trunk
Commit: 85ec5573eb9fd746a9295ecc6fe1ae683073aaf5
Parents: 0005816
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:22:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:22:48 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 57 +
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 67 ++--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  2 +-
 4 files changed, 108 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85ec5573/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 7e6c7e3..fb11cb7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -239,14 +239,33 @@ public class Client {
* 
* @param conf Configuration
* @return the timeout period in milliseconds. -1 if no timeout value is set
+   * @deprecated use {@link #getRpcTimeout(Configuration)} instead
*/
+  @Deprecated
   final public static int getTimeout(Configuration conf) {
+int timeout = getRpcTimeout(conf);
+if (timeout > 0)  {
+  return timeout;
+}
 if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
 CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
   return getPingInterval(conf);
 }
 return -1;
   }
+
+  /**
+   * The time after which a RPC will timeout.
+   *
+   * @param conf Configuration
+   * @return the timeout period in milliseconds.
+   */
+  public static final int getRpcTimeout(Configuration conf) {
+int timeout =
+conf.getInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY,
+CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_DEFAULT);
+return (timeout < 0) ? 0 : timeout;
+  }
   /**
* set the connection timeout value in configuration
* 
@@ -386,7 +405,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +413,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +454,14 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  if (rpcTimeout > 0) {
+// effective rpc timeout is rounded up to multiple of pingInterval
+// if pingInterval < rpcTimeout.
+this.soTimeout = (doPing && pingInterval < rpcTimeout) ?
+pingInterval : rpcTimeout;
+  } else {
+this.soTimeout = pingInterval;
+  }
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +512,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IO

hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0907ce8c9 -> 2542e9bcc


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

(cherry picked from commit 85ec5573eb9fd746a9295ecc6fe1ae683073aaf5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2542e9bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2542e9bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2542e9bc

Branch: refs/heads/branch-2
Commit: 2542e9bccfc3b9a58fca16224e3a41a7e2ddbd62
Parents: 0907ce8
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:22:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:26:33 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 57 +
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 67 ++--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  2 +-
 4 files changed, 108 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2542e9bc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 093fe1e..efdb3f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -239,14 +239,33 @@ public class Client {
* 
* @param conf Configuration
* @return the timeout period in milliseconds. -1 if no timeout value is set
+   * @deprecated use {@link #getRpcTimeout(Configuration)} instead
*/
+  @Deprecated
   final public static int getTimeout(Configuration conf) {
+int timeout = getRpcTimeout(conf);
+if (timeout > 0)  {
+  return timeout;
+}
 if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
 CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
   return getPingInterval(conf);
 }
 return -1;
   }
+
+  /**
+   * The time after which a RPC will timeout.
+   *
+   * @param conf Configuration
+   * @return the timeout period in milliseconds.
+   */
+  public static final int getRpcTimeout(Configuration conf) {
+int timeout =
+conf.getInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY,
+CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_DEFAULT);
+return (timeout < 0) ? 0 : timeout;
+  }
   /**
* set the connection timeout value in configuration
* 
@@ -386,7 +405,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +413,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +454,14 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  if (rpcTimeout > 0) {
+// effective rpc timeout is rounded up to multiple of pingInterval
+// if pingInterval < rpcTimeout.
+this.soTimeout = (doPing && pingInterval < rpcTimeout) ?
+pingInterval : rpcTimeout;
+  } else {
+this.soTimeout = pingInterval;
+  }
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +512,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.

hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3df56a97d -> 886273e12


HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

(cherry picked from commit 85ec5573eb9fd746a9295ecc6fe1ae683073aaf5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/886273e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/886273e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/886273e1

Branch: refs/heads/branch-2.8
Commit: 886273e123c53addefbc6021bffcefa9c5b67a83
Parents: 3df56a9
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:22:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:29:49 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 57 +
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 67 ++--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  2 +-
 4 files changed, 108 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/886273e1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index af6726e..4127a4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -239,14 +239,33 @@ public class Client {
* 
* @param conf Configuration
* @return the timeout period in milliseconds. -1 if no timeout value is set
+   * @deprecated use {@link #getRpcTimeout(Configuration)} instead
*/
+  @Deprecated
   final public static int getTimeout(Configuration conf) {
+int timeout = getRpcTimeout(conf);
+if (timeout > 0)  {
+  return timeout;
+}
 if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
 CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
   return getPingInterval(conf);
 }
 return -1;
   }
+
+  /**
+   * The time after which a RPC will timeout.
+   *
+   * @param conf Configuration
+   * @return the timeout period in milliseconds.
+   */
+  public static final int getRpcTimeout(Configuration conf) {
+int timeout =
+conf.getInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY,
+CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_DEFAULT);
+return (timeout < 0) ? 0 : timeout;
+  }
   /**
* set the connection timeout value in configuration
* 
@@ -386,7 +405,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +413,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +454,14 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  if (rpcTimeout > 0) {
+// effective rpc timeout is rounded up to multiple of pingInterval
+// if pingInterval < rpcTimeout.
+this.soTimeout = (doPing && pingInterval < rpcTimeout) ?
+pingInterval : rpcTimeout;
+  } else {
+this.soTimeout = pingInterval;
+  }
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +512,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.

hadoop git commit: YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk 85ec5573e -> 30206346c


YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30206346
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30206346
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30206346

Branch: refs/heads/trunk
Commit: 30206346cf13fe1b7267f86e7c210b77c86b88c9
Parents: 85ec557
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:47:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:47:22 2016 +0900

--
 .../hadoop-yarn-site/src/site/markdown/SecureContainer.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30206346/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
index cd4f913..f7706c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
@@ -114,7 +114,7 @@ min.user.id=1000#Prevent other super-users
 
   `yarn.nodemanager.windows-secure-container-executor.impersonate.allowed` 
should contain the users that are allowed to create containers in the cluster. 
These users will be allowed to be impersonated by hadoopwinutilsvc.
 
-  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explictly forbiden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
+  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explicitly forbidden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
 
   `yarn.nodemanager.windows-secure-container-executor.local-dirs` should 
contain the nodemanager local dirs. hadoopwinutilsvc will allow only file 
operations under these directories. This should contain the same values as 
`$yarn.nodemanager.local-dirs, $yarn.nodemanager.log-dirs` but note that 
hadoopwinutilsvc XML configuration processing does not do substitutions so the 
value must be the final value. All paths must be absolute and no environment 
variable substitution will be performed. The paths are compared 
LOCAL\_INVARIANT case insensitive string comparison, the file path validated 
must start with one of the paths listed in local-dirs configuration. Use comma 
as path separator:`,`
 



hadoop git commit: YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2542e9bcc -> 36a3fe033


YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi 
via iwasakims)

(cherry picked from commit 30206346cf13fe1b7267f86e7c210b77c86b88c9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36a3fe03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36a3fe03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36a3fe03

Branch: refs/heads/branch-2
Commit: 36a3fe0334181074d159ff42b4430dd44738d06a
Parents: 2542e9b
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:47:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:47:42 2016 +0900

--
 .../hadoop-yarn-site/src/site/markdown/SecureContainer.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36a3fe03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
index cd4f913..f7706c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
@@ -114,7 +114,7 @@ min.user.id=1000#Prevent other super-users
 
   `yarn.nodemanager.windows-secure-container-executor.impersonate.allowed` 
should contain the users that are allowed to create containers in the cluster. 
These users will be allowed to be impersonated by hadoopwinutilsvc.
 
-  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explictly forbiden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
+  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explicitly forbidden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
 
   `yarn.nodemanager.windows-secure-container-executor.local-dirs` should 
contain the nodemanager local dirs. hadoopwinutilsvc will allow only file 
operations under these directories. This should contain the same values as 
`$yarn.nodemanager.local-dirs, $yarn.nodemanager.log-dirs` but note that 
hadoopwinutilsvc XML configuration processing does not do substitutions so the 
value must be the final value. All paths must be absolute and no environment 
variable substitution will be performed. The paths are compared 
LOCAL\_INVARIANT case insensitive string comparison, the file path validated 
must start with one of the paths listed in local-dirs configuration. Use comma 
as path separator:`,`
 



hadoop git commit: YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 886273e12 -> 90adda5da


YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi 
via iwasakims)

(cherry picked from commit 30206346cf13fe1b7267f86e7c210b77c86b88c9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90adda5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90adda5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90adda5d

Branch: refs/heads/branch-2.8
Commit: 90adda5da5095d2f7224192d7b24aa56db056129
Parents: 886273e
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:47:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:47:53 2016 +0900

--
 .../hadoop-yarn-site/src/site/markdown/SecureContainer.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90adda5d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
index cd4f913..f7706c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
@@ -114,7 +114,7 @@ min.user.id=1000#Prevent other super-users
 
   `yarn.nodemanager.windows-secure-container-executor.impersonate.allowed` 
should contain the users that are allowed to create containers in the cluster. 
These users will be allowed to be impersonated by hadoopwinutilsvc.
 
-  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explictly forbiden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
+  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explicitly forbidden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
 
   `yarn.nodemanager.windows-secure-container-executor.local-dirs` should 
contain the nodemanager local dirs. hadoopwinutilsvc will allow only file 
operations under these directories. This should contain the same values as 
`$yarn.nodemanager.local-dirs, $yarn.nodemanager.log-dirs` but note that 
hadoopwinutilsvc XML configuration processing does not do substitutions so the 
value must be the final value. All paths must be absolute and no environment 
variable substitution will be performed. The paths are compared 
LOCAL\_INVARIANT case insensitive string comparison, the file path validated 
must start with one of the paths listed in local-dirs configuration. Use comma 
as path separator:`,`
 



hadoop git commit: YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 960860133 -> 6576fc6cc


YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6576fc6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6576fc6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6576fc6c

Branch: refs/heads/branch-2.7
Commit: 6576fc6f736bc607e87e8c768fe8c804661f
Parents: 9608601
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:50:01 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:50:01 2016 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop-yarn-site/src/site/markdown/SecureContainer.md | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6576fc6c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 28677af..3f0bcaf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -118,6 +118,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4773. Log aggregation performs extraneous filesystem operations when
 rolling log aggregation is disabled (Jun Gong via jlowe)
 
+YARN-4915. Fix typo in YARN Secure Containers documentation
+(Takashi Ohnishi via iwasakims)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6576fc6c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
index cd4f913..f7706c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
@@ -114,7 +114,7 @@ min.user.id=1000#Prevent other super-users
 
   `yarn.nodemanager.windows-secure-container-executor.impersonate.allowed` 
should contain the users that are allowed to create containers in the cluster. 
These users will be allowed to be impersonated by hadoopwinutilsvc.
 
-  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explictly forbiden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
+  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explicitly forbidden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
 
   `yarn.nodemanager.windows-secure-container-executor.local-dirs` should 
contain the nodemanager local dirs. hadoopwinutilsvc will allow only file 
operations under these directories. This should contain the same values as 
`$yarn.nodemanager.local-dirs, $yarn.nodemanager.log-dirs` but note that 
hadoopwinutilsvc XML configuration processing does not do substitutions so the 
value must be the final value. All paths must be absolute and no environment 
variable substitution will be performed. The paths are compared 
LOCAL\_INVARIANT case insensitive string comparison, the file path validated 
must start with one of the paths listed in local-dirs configuration. Use comma 
as path separator:`,`
 



hadoop git commit: YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/trunk 30206346c -> 500e5a595


YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/500e5a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/500e5a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/500e5a59

Branch: refs/heads/trunk
Commit: 500e5a5952f8f34bf0e1e2653fa01b357d68cc8f
Parents: 3020634
Author: Masatake Iwasaki 
Authored: Wed Apr 6 04:00:31 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 04:00:31 2016 +0900

--
 .../src/site/markdown/CapacityScheduler.md| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e5a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index e86c4f9..8c0b8c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -55,11 +55,11 @@ The `CapacityScheduler` supports the following features:
 
 * **Hierarchical Queues** - Hierarchy of queues is supported to ensure 
resources are shared among the sub-queues of an organization before other 
queues are allowed to use free resources, there-by providing more control and 
predictability.
 
-* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Adminstrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
+* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Administrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
 
 * **Security** - Each queue has strict ACLs which controls which users can 
submit applications to individual queues. Also, there are safe-guards to ensure 
that users cannot view and/or modify applications from other users. Also, 
per-queue and system administrator roles are supported.
 
-* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artifical silos of resources in the cluster which helps utilization.
+* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artificial silos of resources in the cluster which helps utilization.
 
 * **Multi-tenancy** - Comprehensive set of limits are provided to prevent a 
single application, user and queue from monopolizing resources of the queue or 
the cluster as a whole to ensure that the cluster isn't overwhelmed.
 
@@ -67,9 +67,9 @@ The `CapacityScheduler` supports the following features:
 
 * Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
 
-* Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queueus*

hadoop git commit: YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 36a3fe033 -> e98bb0279


YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi 
via iwasakims)

(cherry picked from commit 500e5a5952f8f34bf0e1e2653fa01b357d68cc8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e98bb027
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e98bb027
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e98bb027

Branch: refs/heads/branch-2
Commit: e98bb0279bca224903db09c8145148e1f12700f5
Parents: 36a3fe0
Author: Masatake Iwasaki 
Authored: Wed Apr 6 04:00:31 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 04:01:34 2016 +0900

--
 .../src/site/markdown/CapacityScheduler.md| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e98bb027/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index b43a032..8b845c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -55,11 +55,11 @@ The `CapacityScheduler` supports the following features:
 
 * **Hierarchical Queues** - Hierarchy of queues is supported to ensure 
resources are shared among the sub-queues of an organization before other 
queues are allowed to use free resources, there-by providing more control and 
predictability.
 
-* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Adminstrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
+* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Administrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
 
 * **Security** - Each queue has strict ACLs which controls which users can 
submit applications to individual queues. Also, there are safe-guards to ensure 
that users cannot view and/or modify applications from other users. Also, 
per-queue and system administrator roles are supported.
 
-* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artifical silos of resources in the cluster which helps utilization.
+* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artificial silos of resources in the cluster which helps utilization.
 
 * **Multi-tenancy** - Comprehensive set of limits are provided to prevent a 
single application, user and queue from monopolizing resources of the queue or 
the cluster as a whole to ensure that the cluster isn't overwhelmed.
 
@@ -67,9 +67,9 @@ The `CapacityScheduler` supports the following features:
 
 * Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
 
-* Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new

hadoop git commit: YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 90adda5da -> deef54b26


YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi 
via iwasakims)

(cherry picked from commit 500e5a5952f8f34bf0e1e2653fa01b357d68cc8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/deef54b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/deef54b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/deef54b2

Branch: refs/heads/branch-2.8
Commit: deef54b2619e28e7e2199e1f4ebda269a30cba7d
Parents: 90adda5
Author: Masatake Iwasaki 
Authored: Wed Apr 6 04:00:31 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 04:01:48 2016 +0900

--
 .../src/site/markdown/CapacityScheduler.md| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/deef54b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index e86c4f9..8c0b8c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -55,11 +55,11 @@ The `CapacityScheduler` supports the following features:
 
 * **Hierarchical Queues** - Hierarchy of queues is supported to ensure 
resources are shared among the sub-queues of an organization before other 
queues are allowed to use free resources, there-by providing more control and 
predictability.
 
-* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Adminstrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
+* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Administrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
 
 * **Security** - Each queue has strict ACLs which controls which users can 
submit applications to individual queues. Also, there are safe-guards to ensure 
that users cannot view and/or modify applications from other users. Also, 
per-queue and system administrator roles are supported.
 
-* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artifical silos of resources in the cluster which helps utilization.
+* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artificial silos of resources in the cluster which helps utilization.
 
 * **Multi-tenancy** - Comprehensive set of limits are provided to prevent a 
single application, user and queue from monopolizing resources of the queue or 
the cluster as a whole to ensure that the cluster isn't overwhelmed.
 
@@ -67,9 +67,9 @@ The `CapacityScheduler` supports the following features:
 
 * Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
 
-* Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state,

hadoop git commit: YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi via iwasakims)

2016-04-05 Thread iwasakims
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 6576fc6cc -> 76e0bb7a1


YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76e0bb7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76e0bb7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76e0bb7a

Branch: refs/heads/branch-2.7
Commit: 76e0bb7a19fbd2c4638a75fc44b4e731e7d536eb
Parents: 6576fc6
Author: Masatake Iwasaki 
Authored: Wed Apr 6 04:03:01 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 04:03:01 2016 +0900

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../src/site/markdown/CapacityScheduler.md| 10 +-
 2 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76e0bb7a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3f0bcaf..03621ac 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -121,6 +121,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4915. Fix typo in YARN Secure Containers documentation
 (Takashi Ohnishi via iwasakims)
 
+YARN-4917. Fix typos in documentation of Capacity Scheduler.
+(Takashi Ohnishi via iwasakims)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76e0bb7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 7b19acd..9d170bb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -54,11 +54,11 @@ The `CapacityScheduler` supports the following features:
 
 * **Hierarchical Queues** - Hierarchy of queues is supported to ensure 
resources are shared among the sub-queues of an organization before other 
queues are allowed to use free resources, there-by providing more control and 
predictability.
 
-* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Adminstrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
+* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Administrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
 
 * **Security** - Each queue has strict ACLs which controls which users can 
submit applications to individual queues. Also, there are safe-guards to ensure 
that users cannot view and/or modify applications from other users. Also, 
per-queue and system administrator roles are supported.
 
-* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artifical silos of resources in the cluster which helps utilization.
+* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artificial silos of resources in the cluster which helps utilization.
 
 * **Multi-tenancy** - Comprehensive set of limits are provided to prevent a 
single application, user and queue from monopolizing resources of the queue or 
the cluster as a whole to ensure that the cluster isn't overwhelmed.
 
@@ -66,9 +66,9 @@ The `CapacityScheduler` supports

hadoop git commit: HDFS-9720. DiskBalancer : Add configuration parameters. Contributed by Anu Engineer.

2016-04-05 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 3258c8b03 -> 12b4cf787


HDFS-9720. DiskBalancer : Add configuration parameters. Contributed by Anu 
Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12b4cf78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12b4cf78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12b4cf78

Branch: refs/heads/HDFS-1312
Commit: 12b4cf787e4b275bdea06c85fc5d155bae1fecee
Parents: 3258c8b
Author: Anu Engineer 
Authored: Tue Apr 5 12:23:35 2016 -0700
Committer: Anu Engineer 
Committed: Tue Apr 5 12:23:35 2016 -0700

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  4 +-
 .../ClientDatanodeProtocolTranslatorPB.java |  8 +-
 .../server/datanode/DiskBalancerWorkItem.java   | 77 +++
 .../src/main/proto/ClientDatanodeProtocol.proto |  2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 15 
 ...tDatanodeProtocolServerSideTranslatorPB.java |  6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 +-
 .../hdfs/server/datanode/DiskBalancer.java  | 81 +++-
 .../server/diskbalancer/planner/MoveStep.java   | 75 ++
 .../hdfs/server/diskbalancer/planner/Step.java  | 23 ++
 .../diskbalancer/TestDiskBalancerRPC.java   | 31 
 .../TestDiskBalancerWithMockMover.java  | 37 -
 .../hdfs/server/diskbalancer/TestPlanner.java   | 29 ---
 13 files changed, 328 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b4cf78/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index cf29bc5..133355c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -161,8 +161,8 @@ public interface ClientDatanodeProtocol {
   /**
* Submit a disk balancer plan for execution.
*/
-  void submitDiskBalancerPlan(String planID, long planVersion, long bandwidth,
-  String plan) throws IOException;
+  void submitDiskBalancerPlan(String planID, long planVersion, String plan,
+  boolean skipDateCheck) throws IOException;
 
   /**
* Cancel an executing plan.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b4cf78/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 359d490..d5e9920 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -330,22 +330,20 @@ public class ClientDatanodeProtocolTranslatorPB implements
*   local copies of these plans.
* @param planVersion - The data format of the plans - for future , not
*used now.
-   * @param bandwidth - Maximum disk bandwidth to consume, setting this value
-   *  to zero allows datanode to use the value defined in
-   *  configration.
* @param plan - Actual plan.
+   * @param skipDateCheck - Skips the date check.
* @throws IOException
*/
   @Override
   public void submitDiskBalancerPlan(String planID, long planVersion,
-  long bandwidth, String plan) throws IOException {
+  String plan, boolean skipDateCheck) throws IOException {
 try {
   SubmitDiskBalancerPlanRequestProto request =
   SubmitDiskBalancerPlanRequestProto.newBuilder()
   .setPlanID(planID)
   .setPlanVersion(planVersion)
-  .setMaxDiskBandwidth(bandwidth)
   .setPlan(plan)
+  .setIgnoreDateCheck(skipDateCheck)
   .build();
   rpcProxy.submitDiskBalancerPlan(NULL_CONTROLLER, request);
 } catch (ServiceException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b4cf78/hadoop-hd

[02/50] [abbrv] hadoop git commit: HDFS-5177. blocksScheduled count should be decremented for abandoned blocks (Contributed by Vinayakumar B)

2016-04-05 Thread aengineer
HDFS-5177. blocksScheduled count should be decremented for abandoned blocks 
(Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09d63d5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09d63d5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09d63d5a

Branch: refs/heads/HDFS-1312
Commit: 09d63d5a192b5d6b172f94ff6c94da348fd49ea6
Parents: 690d8a3
Author: Vinayakumar B 
Authored: Wed Mar 30 14:22:00 2016 +0800
Committer: Vinayakumar B 
Committed: Wed Mar 30 14:22:00 2016 +0800

--
 .../blockmanagement/DatanodeStorageInfo.java| 10 
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  4 ++
 .../hadoop/hdfs/TestBlocksScheduledCounter.java | 51 
 3 files changed, 65 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09d63d5a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index c4729ea..843a8d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -298,6 +298,16 @@ public class DatanodeStorageInfo {
 }
   }
 
+  /**
+   * Decrement the number of blocks scheduled for each given storage. This will
+   * be called during abandon block or delete of UC block.
+   */
+  public static void decrementBlocksScheduled(DatanodeStorageInfo... storages) 
{
+for (DatanodeStorageInfo s : storages) {
+  s.getDatanodeDescriptor().decrementBlocksScheduled(s.getStorageType());
+}
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (this == obj) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09d63d5a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 41fd869..ab2f0fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -82,6 +82,10 @@ class FSDirWriteFileOp {
 if (uc == null) {
   return false;
 }
+if (uc.getUnderConstructionFeature() != null) {
+  DatanodeStorageInfo.decrementBlocksScheduled(uc
+  .getUnderConstructionFeature().getExpectedStorageLocations());
+}
 fsd.getBlockManager().removeBlockFromMap(uc);
 
 if(NameNode.stateChangeLog.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09d63d5a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
index b943219..1894278 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
@@ -78,4 +78,55 @@ public class TestBlocksScheduledCounter {
 out.close();   
 assertEquals(0, dn.getBlocksScheduled());
   }
+
+  /**
+   * Abandon block should decrement the scheduledBlocks count for the dataNode.
+   */
+  @Test
+  public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock()
+  throws Exception {
+cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(
+2).build();
+
+cluster.waitActive();
+fs = cluster.getFileSystem();
+
+DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager()
+.getDatanodeManager();
+ArrayList dnList = new ArrayList();
+datanodeManager.fetchDatanodes(dnList, dnList, false);
+for (DatanodeDescriptor descriptor : dnList) {
+  assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(),
+  0, descriptor.getBlocksScheduled());
+}
+

[01/50] [abbrv] hadoop git commit: MAPREDUCE-6663. [NNBench] Refactor nnbench as a Tool implementation. Contributed by Brahma Reddy Battula.

2016-04-05 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 12b4cf787 -> 48a8c9c3e


MAPREDUCE-6663. [NNBench] Refactor nnbench as a Tool implementation. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/690d8a36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/690d8a36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/690d8a36

Branch: refs/heads/HDFS-1312
Commit: 690d8a368d3e967495eafea27659b6124989c89e
Parents: 8286270
Author: Akira Ajisaka 
Authored: Wed Mar 30 11:42:54 2016 +0900
Committer: Akira Ajisaka 
Committed: Wed Mar 30 11:42:54 2016 +0900

--
 .../java/org/apache/hadoop/hdfs/NNBench.java| 239 ++-
 .../org/apache/hadoop/hdfs/TestNNBench.java |  84 +++
 2 files changed, 211 insertions(+), 112 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/690d8a36/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
index 96c4710..ee3cc00 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
@@ -25,7 +25,6 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.PrintStream;
-import java.net.InetAddress;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.Iterator;
@@ -33,6 +32,7 @@ import java.util.StringTokenizer;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.SequenceFile.Writer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.FileInputFormat;
 import org.apache.hadoop.mapred.FileOutputFormat;
@@ -54,6 +55,8 @@ import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -74,49 +77,48 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat;
  *   must be run before running the other operations.
  */
 
-public class NNBench {
+public class NNBench extends Configured implements Tool {
   private static final Log LOG = LogFactory.getLog(
   "org.apache.hadoop.hdfs.NNBench");
   
-  protected static String CONTROL_DIR_NAME = "control";
-  protected static String OUTPUT_DIR_NAME = "output";
-  protected static String DATA_DIR_NAME = "data";
-  protected static final String DEFAULT_RES_FILE_NAME = "NNBench_results.log";
-  protected static final String NNBENCH_VERSION = "NameNode Benchmark 0.4";
-  
-  public static String operation = "none";
-  public static long numberOfMaps = 1l; // default is 1
-  public static long numberOfReduces = 1l; // default is 1
-  public static long startTime = 
+  private static String CONTROL_DIR_NAME = "control";
+  private static String OUTPUT_DIR_NAME = "output";
+  private static String DATA_DIR_NAME = "data";
+  static final String DEFAULT_RES_FILE_NAME = "NNBench_results.log";
+  private static final String NNBENCH_VERSION = "NameNode Benchmark 0.4";
+
+  private String operation = "none";
+  private long numberOfMaps = 1l; // default is 1
+  private long numberOfReduces = 1l; // default is 1
+  private long startTime =
   System.currentTimeMillis() + (120 * 1000); // default is 'now' + 2min
-  public static long blockSize = 1l; // default is 1
-  public static int bytesToWrite = 0; // default is 0
-  public static long bytesPerChecksum = 1l; // default is 1
-  public static long numberOfFiles = 1l; // default is 1
-  public static short replicationFactorPerFile = 1; // default is 1
-  public static String baseDir = "/benchmarks/NNBench";  // default
-

[06/50] [abbrv] hadoop git commit: HDFS-10223. peerFromSocketAndKey performs SASL exchange before setting connection timeouts (cmccabe)

2016-04-05 Thread aengineer
HDFS-10223. peerFromSocketAndKey performs SASL exchange before setting 
connection timeouts (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37e23ce4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37e23ce4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37e23ce4

Branch: refs/heads/HDFS-1312
Commit: 37e23ce45c592f3c9c48a08a52a5f46787f6c0e9
Parents: 60e4116
Author: Colin Patrick Mccabe 
Authored: Wed Mar 30 13:37:37 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Wed Mar 30 13:37:37 2016 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +-
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  6 ++-
 .../erasurecode/ErasureCodingWorker.java|  3 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |  2 +-
 .../datatransfer/sasl/TestSaslDataTransfer.java | 48 
 5 files changed, 55 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e23ce4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 88bd219..d4e3187 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2734,9 +2734,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(),
   socketTimeout);
   peer = DFSUtilClient.peerFromSocketAndKey(saslClient, sock, this,
-  blockToken, datanodeId);
-  peer.setReadTimeout(socketTimeout);
-  peer.setWriteTimeout(socketTimeout);
+  blockToken, datanodeId, socketTimeout);
   success = true;
   return peer;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e23ce4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 880234e..b9f4dce2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -587,12 +587,14 @@ public class DFSUtilClient {
   public static Peer peerFromSocketAndKey(
 SaslDataTransferClient saslClient, Socket s,
 DataEncryptionKeyFactory keyFactory,
-Token blockToken, DatanodeID datanodeId)
-throws IOException {
+Token blockToken, DatanodeID datanodeId,
+int socketTimeoutMs) throws IOException {
 Peer peer = null;
 boolean success = false;
 try {
   peer = peerFromSocket(s);
+  peer.setReadTimeout(socketTimeoutMs);
+  peer.setWriteTimeout(socketTimeoutMs);
   peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
   success = true;
   return peer;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e23ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 74fb3e1..4bcb291 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -875,8 +875,7 @@ public final class ErasureCodingWorker {
 NetUtils.connect(sock, addr, socketTimeout);
 peer = DFSUtilClient.peerFromSocketAndKey(datanode.getSaslClient(),
 sock, datanode.getDataEncryptionKeyFactoryForBlock(b),
-blockToken, datanodeId);
-peer.setReadTimeout(socketTimeout);
+blockToken, datanodeId, socketTimeout);
 success = true;
 return peer;
   } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37e23ce4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hd

[17/50] [abbrv] hadoop git commit: YARN-4639. Remove dead code in TestDelegationTokenRenewer added in YARN-3055 (templedf via rkanter)

2016-04-05 Thread aengineer
YARN-4639. Remove dead code in TestDelegationTokenRenewer added in YARN-3055 
(templedf via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a021471
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a021471
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a021471

Branch: refs/heads/HDFS-1312
Commit: 7a021471c376ce846090fbd1a315266bada048d4
Parents: 6d67420
Author: Robert Kanter 
Authored: Thu Mar 31 13:09:09 2016 -0700
Committer: Robert Kanter 
Committed: Thu Mar 31 13:09:09 2016 -0700

--
 .../resourcemanager/security/TestDelegationTokenRenewer.java  | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a021471/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index d85e928..1bfac8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -1090,9 +1090,6 @@ public class TestDelegationTokenRenewer {
 new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
 nm1.registerNode();
 
-//MyFS fs = (MyFS)FileSystem.get(conf);
-//MyToken token1 = fs.getDelegationToken("user123");
-
 // create Token1:
 Text userText1 = new Text("user");
 DelegationTokenIdentifier dtId1 =



[24/50] [abbrv] hadoop git commit: HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. Contributed by Aaron Peterson and harsh.

2016-04-05 Thread aengineer
HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. 
Contributed by Aaron Peterson and harsh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/256c82fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/256c82fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/256c82fe

Branch: refs/heads/HDFS-1312
Commit: 256c82fe2981748cd0befc5490d8118d139908f9
Parents: 3488c4f
Author: Harsh J 
Authored: Fri Apr 1 14:18:10 2016 +0530
Committer: Harsh J 
Committed: Fri Apr 1 14:18:10 2016 +0530

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 70 +++-
 .../src/site/markdown/tools/hadoop-aws/index.md |  7 ++
 2 files changed, 76 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/256c82fe/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 7ab6c79..6afb05d 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -26,6 +26,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
@@ -1128,7 +1129,7 @@ public class S3AFileSystem extends FileSystem {
 }
 
 ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
-final ObjectMetadata dstom = srcom.clone();
+ObjectMetadata dstom = cloneObjectMetadata(srcom);
 if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
   dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
 }
@@ -1235,6 +1236,73 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
+   * Creates a copy of the passed {@link ObjectMetadata}.
+   * Does so without using the {@link ObjectMetadata#clone()} method,
+   * to avoid copying unnecessary headers.
+   * @param source the {@link ObjectMetadata} to copy
+   * @return a copy of {@link ObjectMetadata} with only relevant attributes
+   */
+  private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) {
+// This approach may be too brittle, especially if
+// in future there are new attributes added to ObjectMetadata
+// that we do not explicitly call to set here
+ObjectMetadata ret = new ObjectMetadata();
+
+// Non null attributes
+ret.setContentLength(source.getContentLength());
+
+// Possibly null attributes
+// Allowing nulls to pass breaks it during later use
+if (source.getCacheControl() != null) {
+  ret.setCacheControl(source.getCacheControl());
+}
+if (source.getContentDisposition() != null) {
+  ret.setContentDisposition(source.getContentDisposition());
+}
+if (source.getContentEncoding() != null) {
+  ret.setContentEncoding(source.getContentEncoding());
+}
+if (source.getContentMD5() != null) {
+  ret.setContentMD5(source.getContentMD5());
+}
+if (source.getContentType() != null) {
+  ret.setContentType(source.getContentType());
+}
+if (source.getExpirationTime() != null) {
+  ret.setExpirationTime(source.getExpirationTime());
+}
+if (source.getExpirationTimeRuleId() != null) {
+  ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId());
+}
+if (source.getHttpExpiresDate() != null) {
+  ret.setHttpExpiresDate(source.getHttpExpiresDate());
+}
+if (source.getLastModified() != null) {
+  ret.setLastModified(source.getLastModified());
+}
+if (source.getOngoingRestore() != null) {
+  ret.setOngoingRestore(source.getOngoingRestore());
+}
+if (source.getRestoreExpirationTime() != null) {
+  ret.setRestoreExpirationTime(source.getRestoreExpirationTime());
+}
+if (source.getSSEAlgorithm() != null) {
+  ret.setSSEAlgorithm(source.getSSEAlgorithm());
+}
+if (source.getSSECustomerAlgorithm() != null) {
+  ret.setSSECustomerAlgorithm(source.getSSECustomerAlgorithm());
+}
+if (source.getSSECustomerKeyMd5() != null) {
+  ret.setSSECustomerKeyMd5(source.getSSECustomerKeyMd5());
+}
+
+for (Map.Entry e : source.getUserMetadata().entrySet()) {
+  ret.addUserMetadata(e.getKey(), e.getValue());
+}
+return ret;
+  }
+
+  /**
* Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
* @deprecated use {@link #getDefaultBlockSize(Path)} instead

http://git-wip-us.apache.org/repos/as

[34/50] [abbrv] hadoop git commit: HADOOP-11212. NetUtils.wrapException to handle SocketException explicitly. (Contributed by Steve Loughran)

2016-04-05 Thread aengineer
HADOOP-11212. NetUtils.wrapException to handle SocketException explicitly. 
(Contributed by Steve Loughran)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7280550a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7280550a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7280550a

Branch: refs/heads/HDFS-1312
Commit: 7280550a8f668df8aa32e4630db4ead49e9b8b6d
Parents: 89c9347
Author: Arpit Agarwal 
Authored: Mon Apr 4 10:50:11 2016 -0700
Committer: Arpit Agarwal 
Committed: Mon Apr 4 10:50:11 2016 -0700

--
 .../java/org/apache/hadoop/net/NetUtils.java| 15 +--
 .../org/apache/hadoop/net/TestNetUtils.java | 47 +++-
 2 files changed, 38 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280550a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 2c3661a..4050107 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -782,12 +782,21 @@ public class NetUtils {
   + ": " + exception
   + ";"
   + see("EOFException"));
+} else if (exception instanceof SocketException) {
+  // Many of the predecessor exceptions are subclasses of SocketException,
+  // so must be handled before this
+  return wrapWithMessage(exception,
+  "Call From "
+  + localHost + " to " + destHost + ":" + destPort
+  + " failed on socket exception: " + exception
+  + ";"
+  + see("SocketException"));
 }
 else {
   return (IOException) new IOException("Failed on local exception: "
-   + exception
-   + "; Host Details : "
-   + 
getHostDetailsAsString(destHost, destPort, localHost))
+ + exception
+ + "; Host Details : "
+ + getHostDetailsAsString(destHost, destPort, localHost))
   .initCause(exception);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280550a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index c93ede8..e59ac77 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -72,7 +72,7 @@ public class TestNetUtils {
* This is a regression test for HADOOP-6722.
*/
   @Test
-  public void testAvoidLoopbackTcpSockets() throws Exception {
+  public void testAvoidLoopbackTcpSockets() throws Throwable {
 Configuration conf = new Configuration();
 
 Socket socket = NetUtils.getDefaultSocketFactory(conf)
@@ -88,11 +88,11 @@ public class TestNetUtils {
   fail("Should not have connected");
 } catch (ConnectException ce) {
   System.err.println("Got exception: " + ce);
-  assertTrue(ce.getMessage().contains("resulted in a loopback"));
+  assertInException(ce, "resulted in a loopback");
 } catch (SocketException se) {
   // Some TCP stacks will actually throw their own Invalid argument 
exception
   // here. This is also OK.
-  assertTrue(se.getMessage().contains("Invalid argument"));
+  assertInException(se, "Invalid argument");
 }
   }
   
@@ -188,15 +188,11 @@ public class TestNetUtils {
   }  
 
   @Test
-  public void testVerifyHostnamesNoException() {
+  public void testVerifyHostnamesNoException() throws UnknownHostException {
 String[] names = {"valid.host.com", "1.com"};
-try {
-  NetUtils.verifyHostnames(names);
-} catch (UnknownHostException e) {
-  fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
-}
+NetUtils.verifyHostnames(names);
   }
-  
+
   /** 
* Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
*/
@@ -267,7 +263,18 @@ public class TestNetUtils {
 assertRemoteDetailsIncluded(wrapped);
 assertInException(wrapped, "/EOFException");
   }
-  
+
+  @Test
+  public void testWrapSocketException() throws Throwable {
+IOEx

[12/50] [abbrv] hadoop git commit: HADOOP-11393. Revert HADOOP_PREFIX, go back to HADOOP_HOME (aw)

2016-04-05 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a74610d/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
index 6470393..a7a001c 100644
--- 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
+++ 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/LoadTypedBytes.java
@@ -89,7 +89,7 @@ public class LoadTypedBytes implements Tool {
   }
 
   private void printUsage() {
-System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar 
hadoop-streaming.jar"
+System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar 
hadoop-streaming.jar"
 + " loadtb ");
 System.out.println("  Reads typed bytes from standard input" +
 " and stores them in a sequence file in");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a74610d/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
index 118e0fb..9f5b293 100644
--- 
a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
+++ 
b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java
@@ -502,7 +502,7 @@ public class StreamJob implements Tool {
   }
 
   private void printUsage(boolean detailed) {
-System.out.println("Usage: $HADOOP_PREFIX/bin/hadoop jar 
hadoop-streaming.jar"
+System.out.println("Usage: $HADOOP_HOME/bin/hadoop jar 
hadoop-streaming.jar"
 + " [options]");
 System.out.println("Options:");
 System.out.println("  -input   DFS input file(s) for the Map"
@@ -551,7 +551,7 @@ public class StreamJob implements Tool {
   System.out.println();
   System.out.println("For more details about these options:");
   System.out.println("Use " +
-  "$HADOOP_PREFIX/bin/hadoop jar hadoop-streaming.jar -info");
+  "$HADOOP_HOME/bin/hadoop jar hadoop-streaming.jar -info");
   return;
 }
 System.out.println();
@@ -611,7 +611,7 @@ public class StreamJob implements Tool {
 System.out.println("  -D stream.non.zero.exit.is.failure=false");
 System.out.println("Use a custom hadoop streaming build along with 
standard"
 + " hadoop install:");
-System.out.println("  $HADOOP_PREFIX/bin/hadoop jar " +
+System.out.println("  $HADOOP_HOME/bin/hadoop jar " +
 "/path/my-hadoop-streaming.jar [...]\\");
 System.out.println("[...] -D stream.shipped.hadoopstreaming=" +
 "/path/my-hadoop-streaming.jar");
@@ -625,7 +625,7 @@ public class StreamJob implements Tool {
 System.out.println("   -cmdenv EXAMPLE_DIR=/home/example/dictionaries/");
 System.out.println();
 System.out.println("Shortcut:");
-System.out.println("   setenv HSTREAMING \"$HADOOP_PREFIX/bin/hadoop jar " 
+
+System.out.println("   setenv HSTREAMING \"$HADOOP_HOME/bin/hadoop jar " +
 "hadoop-streaming.jar\"");
 System.out.println();
 System.out.println("Example: $HSTREAMING -mapper " +
@@ -648,9 +648,9 @@ public class StreamJob implements Tool {
   // 
 
   protected String getHadoopClientHome() {
-String h = env_.getProperty("HADOOP_PREFIX"); // standard Hadoop
+String h = env_.getProperty("HADOOP_HOME"); // standard Hadoop
 if (h == null) {
-  //fail("Missing required environment variable: HADOOP_PREFIX");
+  //fail("Missing required environment variable: HADOOP_HOME");
   h = "UNDEF";
 }
 return h;
@@ -674,8 +674,8 @@ public class StreamJob implements Tool {
 // usually found in: build/contrib or 
build/hadoop--dev-streaming.jar
 
 // First try an explicit spec: it's too hard to find our own location in 
this case:
-// $HADOOP_PREFIX/bin/hadoop jar 
/not/first/on/classpath/custom-hadoop-streaming.jar
-// where findInClasspath() would find the version of hadoop-streaming.jar 
in $HADOOP_PREFIX
+// $HADOOP_HOME/bin/hadoop jar 
/not/first/on/classpath/custom-hadoop-streaming.jar
+// where findInClasspath() would find the version of hadoop-streaming.jar 
in $HADOOP_HOME
 String runtimeClasses = config_.get("stream.shipped.hadoopstreaming"); // 
jar or class dir
 
 if (runtimeClasses == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a74610d/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.sh
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/start-yarn.

[09/50] [abbrv] hadoop git commit: HADOOP-12902. JavaDocs for SignerSecretProvider are out-of-date in AuthenticationFilter. Contributed by Gabor Liptak.

2016-04-05 Thread aengineer
HADOOP-12902. JavaDocs for SignerSecretProvider are out-of-date in 
AuthenticationFilter. Contributed by Gabor Liptak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acca149e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acca149e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acca149e

Branch: refs/heads/HDFS-1312
Commit: acca149ec96f2932bebc492452a63a159de9ce47
Parents: 32c0c3e
Author: Akira Ajisaka 
Authored: Thu Mar 31 16:04:47 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Mar 31 16:04:47 2016 +0900

--
 .../server/AuthenticationFilter.java| 33 ++--
 1 file changed, 16 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acca149e/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 4bdc808..5c93fd3 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -61,9 +61,9 @@ import java.util.*;
  * [#PREFIX#.]type: simple|kerberos|#CLASS#, 'simple' is short for the
  * {@link PseudoAuthenticationHandler}, 'kerberos' is short for {@link 
KerberosAuthenticationHandler}, otherwise
  * the full class name of the {@link AuthenticationHandler} must be 
specified.
- * [#PREFIX#.]signature.secret: when signer.secret.provider is set to
- * "string" or not specified, this is the value for the secret used to sign the
- * HTTP cookie.
+ * [#PREFIX#.]signature.secret.file: when signer.secret.provider is set to
+ * "file" or not specified, this is the location of file including the secret
+ *  used to sign the HTTP cookie.
  * [#PREFIX#.]token.validity: time -in seconds- that the generated token is
  * valid before a new authentication is triggered, default value is
  * 3600 seconds. This is also used for the rollover interval for
@@ -79,17 +79,16 @@ import java.util.*;
  * 
  * 
  * Out of the box it provides 3 signer secret provider implementations:
- * "string", "random", and "zookeeper"
+ * "file", "random" and "zookeeper"
  * 
  * Additional signer secret providers are supported via the
  * {@link SignerSecretProvider} class.
  * 
  * For the HTTP cookies mentioned above, the SignerSecretProvider is used to
  * determine the secret to use for signing the cookies. Different
- * implementations can have different behaviors.  The "string" implementation
- * simply uses the string set in the [#PREFIX#.]signature.secret property
- * mentioned above.  The "random" implementation uses a randomly generated
- * secret that rolls over at the interval specified by the
+ * implementations can have different behaviors. The "file" implementation
+ * loads the secret from a specified file. The "random" implementation uses a
+ * randomly generated secret that rolls over at the interval specified by the
  * [#PREFIX#.]token.validity mentioned above.  The "zookeeper" implementation
  * is like the "random" one, except that it synchronizes the random secret
  * and rollovers between multiple servers; it's meant for HA services.
@@ -97,12 +96,12 @@ import java.util.*;
  * The relevant configuration properties are:
  * 
  * signer.secret.provider: indicates the name of the SignerSecretProvider
- * class to use. Possible values are: "string", "random", "zookeeper", or a
- * classname. If not specified, the "string" implementation will be used with
- * [#PREFIX#.]signature.secret; and if that's not specified, the "random"
+ * class to use. Possible values are: "file", "random", "zookeeper", or a
+ * classname. If not specified, the "file" implementation will be used with
+ * [#PREFIX#.]signature.secret.file; and if that's not specified, the "random"
  * implementation will be used.
- * [#PREFIX#.]signature.secret: When the "string" implementation is
- * specified, this value is used as the secret.
+ * [#PREFIX#.]signature.secret.file: When the "file" implementation is
+ * specified, this content of this file is used as the secret.
  * [#PREFIX#.]token.validity: When the "random" or "zookeeper"
  * implementations are specified, this value is used as the rollover
  * interval.
@@ -176,10 +175,10 @@ public class AuthenticationFilter implements Filter {
   /**
* Constant for the configuration property that indicates the name of the

[15/50] [abbrv] hadoop git commit: HADOOP-12916. Allow RPC scheduler/callqueue backoff using response times. Contributed by Xiaoyu Yao.

2016-04-05 Thread aengineer
HADOOP-12916. Allow RPC scheduler/callqueue backoff using response times. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d95c6eb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d95c6eb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d95c6eb3

Branch: refs/heads/HDFS-1312
Commit: d95c6eb32cec7768ac418fb467b1198ccf3cf0dc
Parents: 0a74610
Author: Xiaoyu Yao 
Authored: Thu Mar 31 08:42:57 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Mar 31 08:42:57 2016 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   |  13 +
 .../hadoop/fs/CommonConfigurationKeys.java  |  14 +-
 .../org/apache/hadoop/ipc/CallQueueManager.java | 124 +-
 .../apache/hadoop/ipc/DecayRpcScheduler.java| 396 +++
 .../hadoop/ipc/DecayRpcSchedulerMXBean.java |   2 +
 .../apache/hadoop/ipc/DefaultRpcScheduler.java  |  45 +++
 .../org/apache/hadoop/ipc/FairCallQueue.java|  45 +--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|   8 +-
 .../org/apache/hadoop/ipc/RpcScheduler.java |   8 +-
 .../java/org/apache/hadoop/ipc/Schedulable.java |   5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |  77 +++-
 .../apache/hadoop/ipc/WritableRpcEngine.java|  45 +--
 .../apache/hadoop/ipc/TestCallQueueManager.java | 147 ++-
 .../hadoop/ipc/TestDecayRpcScheduler.java   |  42 +-
 .../apache/hadoop/ipc/TestFairCallQueue.java|  79 ++--
 .../hadoop/ipc/TestIdentityProviders.java   |  18 +-
 .../java/org/apache/hadoop/ipc/TestRPC.java |  92 -
 17 files changed, 893 insertions(+), 267 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d95c6eb3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 8355d96..4c8f27b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1626,6 +1626,10 @@ public class Configuration implements 
Iterable>,
   return defaultValue;
 }
 vStr = vStr.trim();
+return getTimeDurationHelper(name, vStr, unit);
+  }
+
+  private long getTimeDurationHelper(String name, String vStr, TimeUnit unit) {
 ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr);
 if (null == vUnit) {
   LOG.warn("No unit for " + name + "(" + vStr + ") assuming " + unit);
@@ -1636,6 +1640,15 @@ public class Configuration implements 
Iterable>,
 return unit.convert(Long.parseLong(vStr), vUnit.unit());
   }
 
+  public long[] getTimeDurations(String name, TimeUnit unit) {
+String[] strings = getTrimmedStrings(name);
+long[] durations = new long[strings.length];
+for (int i = 0; i < strings.length; i++) {
+  durations[i] = getTimeDurationHelper(name, strings[i], unit);
+}
+return durations;
+  }
+
   /**
* Get the value of the name property as a Pattern.
* If no such property is specified, or if the specified value is not a valid

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d95c6eb3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 9b4069a..a708900 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -90,14 +90,22 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /**
* CallQueue related settings. These are not used directly, but rather
* combined with a namespace and port. For instance:
-   * IPC_CALLQUEUE_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
+   * IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
*/
-  public static final String IPC_CALLQUEUE_NAMESPACE = "ipc";
+  public static final String IPC_NAMESPACE = "ipc";
   public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
-  public static final String IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY = 
"identity-provider.impl";
+  public static final String IPC_SCHEDULER_IMPL_KEY = "scheduler.impl";
+  public static final String IPC_IDENTITY_PROVIDER_KEY = 
"identity-provider.i

[30/50] [abbrv] hadoop git commit: YARN-4607. Pagination support for AppAttempt page TotalOutstandingResource Requests table. Contributed by Bibin A Chundatt

2016-04-05 Thread aengineer
YARN-4607. Pagination support for AppAttempt page TotalOutstandingResource 
Requests table. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e6f9297
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e6f9297
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e6f9297

Branch: refs/heads/HDFS-1312
Commit: 1e6f92977dc5431b117745feb5a3491e88a559c0
Parents: 0ecdd4c
Author: Rohith Sharma K S 
Authored: Mon Apr 4 08:09:29 2016 +0530
Committer: Rohith Sharma K S 
Committed: Mon Apr 4 08:09:29 2016 +0530

--
 .../hadoop/yarn/server/webapp/WebPageUtils.java |  6 ++
 .../resourcemanager/webapp/AppAttemptPage.java  |  4 +-
 .../webapp/RMAppAttemptBlock.java   | 61 
 3 files changed, 45 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e6f9297/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index a07baa2..3a26ae5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -95,4 +95,10 @@ public class WebPageUtils {
   .append(", 'mRender': parseHadoopID }]").toString();
   }
 
+  public static String resourceRequestsTableInit() {
+return tableInit().append(", 'aaData': resourceRequestsTableData")
+.append(", bDeferRender: true").append(", bProcessing: true}")
+.toString();
+  }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e6f9297/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
index df5fb9e..45f1887 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AppAttemptPage.java
@@ -41,8 +41,10 @@ public class AppAttemptPage extends RmView {
   : join("Application Attempt ",
 $(YarnWebParams.APPLICATION_ATTEMPT_ID)));
 
-set(DATATABLES_ID, "containers");
+set(DATATABLES_ID, "containers resourceRequests");
 set(initID(DATATABLES, "containers"), WebPageUtils.containersTableInit());
+set(initID(DATATABLES, "resourceRequests"),
+WebPageUtils.resourceRequestsTableInit());
 setTableStyles(html, "containers", ".queue {width:6em}", ".ui 
{width:8em}");
 
 set(YarnWebParams.WEB_UI_TYPE, YarnWebParams.RM_WEB_UI);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e6f9297/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 668269e..6fef367 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp

[21/50] [abbrv] hadoop git commit: HADOOP-12950. ShutdownHookManager should have a timeout for each of the Registered shutdown hook. Contributed by Xiaoyu Yao.

2016-04-05 Thread aengineer
HADOOP-12950. ShutdownHookManager should have a timeout for each of the 
Registered shutdown hook. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aac4d65b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aac4d65b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aac4d65b

Branch: refs/heads/HDFS-1312
Commit: aac4d65bf9c6d68f53610e5fe9997a391e3fa053
Parents: 1963978
Author: Xiaoyu Yao 
Authored: Thu Mar 31 15:20:09 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Mar 31 15:22:24 2016 -0700

--
 .../apache/hadoop/util/ShutdownHookManager.java | 116 +++
 .../hadoop/util/TestShutdownHookManager.java|  57 -
 2 files changed, 150 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aac4d65b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index 843454b..33f942f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.util;
 
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -26,6 +28,10 @@ import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
@@ -42,7 +48,12 @@ public class ShutdownHookManager {
   private static final ShutdownHookManager MGR = new ShutdownHookManager();
 
   private static final Log LOG = LogFactory.getLog(ShutdownHookManager.class);
+  private static final long TIMEOUT_DEFAULT = 10;
+  private static final TimeUnit TIME_UNIT_DEFAULT = TimeUnit.SECONDS;
 
+  private static final ExecutorService EXECUTOR =
+  HadoopExecutors.newSingleThreadExecutor(new ThreadFactoryBuilder()
+  .setDaemon(true).build());
   static {
 try {
   Runtime.getRuntime().addShutdownHook(
@@ -50,14 +61,33 @@ public class ShutdownHookManager {
   @Override
   public void run() {
 MGR.shutdownInProgress.set(true);
-for (Runnable hook: MGR.getShutdownHooksInOrder()) {
+for (HookEntry entry: MGR.getShutdownHooksInOrder()) {
+  Future future = EXECUTOR.submit(entry.getHook());
   try {
-hook.run();
+future.get(entry.getTimeout(), entry.getTimeUnit());
+  } catch (TimeoutException ex) {
+future.cancel(true);
+LOG.warn("ShutdownHook '" + entry.getHook().getClass().
+getSimpleName() + "' timeout, " + ex.toString(), ex);
   } catch (Throwable ex) {
-LOG.warn("ShutdownHook '" + hook.getClass().getSimpleName() +
- "' failed, " + ex.toString(), ex);
+LOG.warn("ShutdownHook '" + entry.getHook().getClass().
+getSimpleName() + "' failed, " + ex.toString(), ex);
   }
 }
+try {
+  EXECUTOR.shutdown();
+  if (!EXECUTOR.awaitTermination(TIMEOUT_DEFAULT,
+  TIME_UNIT_DEFAULT)) {
+LOG.error("ShutdownHookManger shutdown forcefully.");
+EXECUTOR.shutdownNow();
+  }
+  LOG.info("ShutdownHookManger complete shutdown.");
+} catch (InterruptedException ex) {
+  LOG.error("ShutdownHookManger interrupted while waiting for " +
+  "termination.", ex);
+  EXECUTOR.shutdownNow();
+  Thread.currentThread().interrupt();
+}
   }
 }
   );
@@ -77,15 +107,24 @@ public class ShutdownHookManager {
   }
 
   /**
-   * Private structure to store ShutdownHook and its priority.
+   * Private structure to store ShutdownHook, its priority and timeout
+   * settings.
*/
-  private static class HookEntry {
-Runnable hook;
-int priority;
+  static class HookEntry {
+private final Runnable hook;
+private final int pri

[13/50] [abbrv] hadoop git commit: HADOOP-11393. Revert HADOOP_PREFIX, go back to HADOOP_HOME (aw)

2016-04-05 Thread aengineer
HADOOP-11393. Revert HADOOP_PREFIX, go back to HADOOP_HOME (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a74610d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a74610d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a74610d

Branch: refs/heads/HDFS-1312
Commit: 0a74610d1c7c7f183d2b2d0b7a775add53cf6c94
Parents: 0064cba
Author: Allen Wittenauer 
Authored: Thu Mar 24 08:47:00 2016 -0700
Committer: Allen Wittenauer 
Committed: Thu Mar 31 07:51:05 2016 -0700

--
 .../hadoop-common/src/main/bin/hadoop   | 12 +++---
 .../hadoop-common/src/main/bin/hadoop-config.sh |  6 ++-
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |  6 +--
 .../src/main/bin/hadoop-daemons.sh  |  6 +--
 .../src/main/bin/hadoop-functions.sh| 37 +-
 .../src/main/bin/hadoop-layout.sh.example   | 14 +++
 .../hadoop-common/src/main/bin/slaves.sh|  6 +--
 .../hadoop-common/src/main/bin/start-all.sh |  4 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |  4 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   | 10 ++---
 .../org/apache/hadoop/tracing/TraceUtils.java   |  4 +-
 .../src/site/markdown/ClusterSetup.md   | 40 ++--
 .../src/site/markdown/CommandsManual.md |  2 +-
 .../src/site/markdown/UnixShellGuide.md |  2 +-
 .../scripts/hadoop-functions_test_helper.bash   |  3 +-
 .../src/test/scripts/hadoop_basic_init.bats |  2 +-
 .../src/test/scripts/hadoop_bootstrap.bats  |  4 +-
 .../src/test/scripts/hadoop_confdir.bats| 24 ++--
 .../src/test/scripts/hadoop_finalize.bats   |  2 +-
 .../hadoop-kms/src/main/conf/kms-env.sh |  4 +-
 .../hadoop-kms/src/main/libexec/kms-config.sh   |  8 ++--
 .../hadoop-kms/src/main/sbin/kms.sh |  4 +-
 .../src/main/conf/httpfs-env.sh |  4 +-
 .../src/main/libexec/httpfs-config.sh   |  8 ++--
 .../hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  |  4 +-
 .../src/main/native/fuse-dfs/doc/README |  6 +--
 .../main/native/fuse-dfs/fuse_dfs_wrapper.sh| 12 +++---
 .../src/main/bin/distribute-exclude.sh  |  4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |  4 +-
 .../hadoop-hdfs/src/main/bin/hdfs-config.sh |  6 +--
 .../src/main/bin/refresh-namenodes.sh   |  4 +-
 .../hadoop-hdfs/src/main/bin/start-balancer.sh  |  4 +-
 .../hadoop-hdfs/src/main/bin/start-dfs.sh   |  4 +-
 .../src/main/bin/start-secure-dns.sh|  4 +-
 .../hadoop-hdfs/src/main/bin/stop-balancer.sh   |  4 +-
 .../hadoop-hdfs/src/main/bin/stop-dfs.sh|  4 +-
 .../hadoop-hdfs/src/main/bin/stop-secure-dns.sh |  4 +-
 .../hadoop-hdfs/src/site/markdown/Federation.md | 18 -
 .../markdown/HDFSHighAvailabilityWithNFS.md |  4 +-
 .../markdown/HDFSHighAvailabilityWithQJM.md |  4 +-
 .../src/site/markdown/HdfsNfsGateway.md |  8 ++--
 .../apache/hadoop/tracing/TestTraceAdmin.java   |  2 +-
 .../TestTracingShortCircuitLocalRead.java   |  4 +-
 hadoop-mapreduce-project/bin/mapred |  4 +-
 hadoop-mapreduce-project/bin/mapred-config.sh   |  6 +--
 .../bin/mr-jobhistory-daemon.sh |  4 +-
 .../apache/hadoop/mapred/pipes/Submitter.java   |  2 +-
 .../java/org/apache/hadoop/fs/DFSCIOTest.java   |  2 +-
 .../apache/hadoop/mapred/ReliabilityTest.java   |  2 +-
 .../apache/hadoop/tools/HadoopArchiveLogs.java  |  4 +-
 .../hadoop/tools/TestHadoopArchiveLogs.java |  4 +-
 .../apache/hadoop/contrib/utils/join/README.txt |  2 +-
 .../native/pipes/debug/pipes-default-script |  5 ++-
 .../hadoop-sls/src/main/bin/rumen2sls.sh|  4 +-
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh  |  8 ++--
 .../apache/hadoop/streaming/DumpTypedBytes.java |  2 +-
 .../hadoop/streaming/HadoopStreaming.java   |  2 +-
 .../apache/hadoop/streaming/LoadTypedBytes.java |  2 +-
 .../org/apache/hadoop/streaming/StreamJob.java  | 16 
 .../hadoop-yarn/bin/start-yarn.sh   |  4 +-
 .../hadoop-yarn/bin/stop-yarn.sh|  4 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  4 +-
 .../hadoop-yarn/bin/yarn-config.sh  |  6 +--
 .../hadoop-yarn/bin/yarn-daemon.sh  |  4 +-
 .../hadoop-yarn/bin/yarn-daemons.sh |  4 +-
 .../TestDockerContainerExecutorWithMocks.java   |  2 +-
 .../site/markdown/DockerContainerExecutor.md.vm |  2 +-
 67 files changed, 211 insertions(+), 208 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a74610d/hadoop-common-project/hadoop-common/src/main/bin/hadoop
--
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 46eaf27

[07/50] [abbrv] hadoop git commit: HADOOP-12886. Exclude weak ciphers in SSLFactory through ssl-server.xml. Contributed by Wei-Chiu Chuang.

2016-04-05 Thread aengineer
HADOOP-12886. Exclude weak ciphers in SSLFactory through ssl-server.xml. 
Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4fc609d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4fc609d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4fc609d

Branch: refs/heads/HDFS-1312
Commit: e4fc609d5d3739b7809057954c5233cfd1d1117b
Parents: 37e23ce
Author: Zhe ZHang 
Authored: Wed Mar 30 14:13:11 2016 -0700
Committer: Zhe ZHang 
Committed: Wed Mar 30 14:13:11 2016 -0700

--
 .../apache/hadoop/security/ssl/SSLFactory.java  |  42 +-
 .../hadoop/security/ssl/TestSSLFactory.java | 139 ++-
 2 files changed, 175 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4fc609d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
index ea65848..95cba80 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 import javax.net.ssl.HostnameVerifier;
@@ -34,6 +36,11 @@ import javax.net.ssl.SSLSocketFactory;
 import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.security.GeneralSecurityException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
 
 /**
  * Factory that creates SSLEngine and SSLSocketFactory instances using
@@ -48,6 +55,7 @@ import java.security.GeneralSecurityException;
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class SSLFactory implements ConnectionConfigurator {
+  static final Logger LOG = LoggerFactory.getLogger(SSLFactory.class);
 
   @InterfaceAudience.Private
   public static enum Mode { CLIENT, SERVER }
@@ -60,7 +68,7 @@ public class SSLFactory implements ConnectionConfigurator {
 "hadoop.ssl.client.conf";
   public static final String SSL_SERVER_CONF_KEY =
 "hadoop.ssl.server.conf";
-  public static final String SSLCERTIFICATE = IBM_JAVA?"ibmX509":"SunX509"; 
+  public static final String SSLCERTIFICATE = IBM_JAVA?"ibmX509":"SunX509";
 
   public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false;
 
@@ -71,6 +79,8 @@ public class SSLFactory implements ConnectionConfigurator {
   "hadoop.ssl.enabled.protocols";
   public static final String DEFAULT_SSL_ENABLED_PROTOCOLS =
   "TLSv1,SSLv2Hello,TLSv1.1,TLSv1.2";
+  public static final String SSL_SERVER_EXCLUDE_CIPHER_LIST =
+  "ssl.server.exclude.cipher.list";
 
   private Configuration conf;
   private Mode mode;
@@ -80,6 +90,7 @@ public class SSLFactory implements ConnectionConfigurator {
   private KeyStoresFactory keystoresFactory;
 
   private String[] enabledProtocols = null;
+  private List excludeCiphers;
 
   /**
* Creates an SSLFactory.
@@ -105,6 +116,14 @@ public class SSLFactory implements ConnectionConfigurator {
 
 enabledProtocols = conf.getStrings(SSL_ENABLED_PROTOCOLS,
 DEFAULT_SSL_ENABLED_PROTOCOLS);
+String excludeCiphersConf =
+sslConf.get(SSL_SERVER_EXCLUDE_CIPHER_LIST, "");
+if (excludeCiphersConf.isEmpty()) {
+  excludeCiphers = new LinkedList();
+} else {
+  LOG.debug("will exclude cipher suites: {}", excludeCiphersConf);
+  excludeCiphers = Arrays.asList(excludeCiphersConf.split(","));
+}
   }
 
   private Configuration readSSLConfiguration(Mode mode) {
@@ -195,11 +214,32 @@ public class SSLFactory implements ConnectionConfigurator 
{
 } else {
   sslEngine.setUseClientMode(false);
   sslEngine.setNeedClientAuth(requireClientCert);
+  disableExcludedCiphers(sslEngine);
 }
 sslEngine.setEnabledProtocols(enabledProtocols);
 return sslEngine;
   }
 
+  private void disableExcludedCiphers(SSLEngine sslEngine) {
+String[] cipherSuites = sslEngine.getEnabledCipherSuites();
+
+ArrayList defaultEnabledCipherSuites =
+new ArrayList(Arrays.asList(cipherSuites));
+Iterator iterator = excludeCiphers.iterator();
+
+while(iterator.hasNext()) {
+  

[05/50] [abbrv] hadoop git commit: YARN-4822. Refactor existing Preemption Policy of CS for easier adding new approach to select preemption candidates. Contributed by Wangda Tan

2016-04-05 Thread aengineer
YARN-4822. Refactor existing Preemption Policy of CS for easier adding new 
approach to select preemption candidates. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60e4116b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60e4116b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60e4116b

Branch: refs/heads/HDFS-1312
Commit: 60e4116bf1d00afed91010e57357fe54057e4e39
Parents: 09d63d5
Author: Jian He 
Authored: Wed Mar 30 12:43:52 2016 -0700
Committer: Jian He 
Committed: Wed Mar 30 12:43:52 2016 -0700

--
 .../monitor/SchedulingEditPolicy.java   |8 +-
 .../monitor/SchedulingMonitor.java  |4 -
 .../CapacitySchedulerPreemptionContext.java |   52 +
 .../CapacitySchedulerPreemptionUtils.java   |   65 ++
 .../capacity/FifoCandidatesSelector.java|  364 ++
 .../capacity/PreemptableResourceCalculator.java |  370 ++
 .../capacity/PreemptionCandidatesSelector.java  |   52 +
 .../ProportionalCapacityPreemptionPolicy.java   | 1086 --
 .../monitor/capacity/TempQueuePerPartition.java |  159 +++
 .../CapacitySchedulerConfiguration.java |   45 +
 .../capacity/preemption/PreemptableQueue.java   |6 -
 .../capacity/preemption/PreemptionManager.java  |2 +-
 ...estProportionalCapacityPreemptionPolicy.java |  133 +--
 ...pacityPreemptionPolicyForNodePartitions.java |   78 +-
 .../TestCapacitySchedulerPreemption.java|   14 +-
 15 files changed, 1432 insertions(+), 1006 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60e4116b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
index 0d587d8..47458a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
@@ -23,7 +23,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResour
 
 public interface SchedulingEditPolicy {
 
-  public void init(Configuration config, RMContext context,
+  void init(Configuration config, RMContext context,
   PreemptableResourceScheduler scheduler);
 
   /**
@@ -31,10 +31,10 @@ public interface SchedulingEditPolicy {
* allowed to track containers and affect the scheduler. The "actions"
* performed are passed back through an EventHandler.
*/
-  public void editSchedule();
+  void editSchedule();
 
-  public long getMonitoringInterval();
+  long getMonitoringInterval();
 
-  public String getPolicyName();
+  String getPolicyName();
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60e4116b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
index d4c129b..55ec858 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingMonitor.java
@@ -45,10 +45,6 @@ public class SchedulingMonitor extends AbstractService {
 this.rmContext = rmContext;
   }
 
-  public long getMonitorInterval() {
-return monitorInterval;
-  }
-  
   @VisibleForTesting
   public synchronized SchedulingEditPolicy getSchedulingEditPolicy() {
 return scheduleEditPolicy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60e4116b/had

[27/50] [abbrv] hadoop git commit: YARN-4657. Javadoc comment is broken for Resources.multiplyByAndAddTo(). (Daniel Templeton via kasha)

2016-04-05 Thread aengineer
YARN-4657. Javadoc comment is broken for Resources.multiplyByAndAddTo(). 
(Daniel Templeton via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81d04cae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81d04cae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81d04cae

Branch: refs/heads/HDFS-1312
Commit: 81d04cae41182808ace5d86cdac7e4d71871eb1e
Parents: 5686caa
Author: Karthik Kambatla 
Authored: Fri Apr 1 16:19:54 2016 -0700
Committer: Karthik Kambatla 
Committed: Fri Apr 1 16:20:00 2016 -0700

--
 .../main/java/org/apache/hadoop/yarn/util/resource/Resources.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81d04cae/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index b05d021..558f96c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -152,7 +152,7 @@ public class Resources {
   }
 
   /**
-   * Multiply @param rhs by @param by, and add the result to @param lhs
+   * Multiply {@code rhs} by {@code by}, and add the result to {@code lhs}
* without creating any new {@link Resource} object
*/
   public static Resource multiplyAndAddTo(



[42/50] [abbrv] hadoop git commit: YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin A Chundatt

2016-04-05 Thread aengineer
YARN-4609. RM Nodes list page takes too much time to load. Contributed by Bibin 
A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/776b549e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/776b549e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/776b549e

Branch: refs/heads/HDFS-1312
Commit: 776b549e2ac20a68a5513cbcaac0edc33233dc03
Parents: 552237d
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:47:25 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:47:25 2016 +0530

--
 .../resourcemanager/webapp/NodesPage.java   | 53 +---
 .../resourcemanager/webapp/TestNodesPage.java   | 37 --
 2 files changed, 45 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/776b549e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 9603468..7063421 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
-import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_LABEL;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
@@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 import com.google.inject.Inject;
@@ -101,6 +100,7 @@ class NodesPage extends RmView {
   LOG.debug("Unexpected state filter for inactive RM node");
 }
   }
+  StringBuilder nodeTableData = new StringBuilder("[\n");
   for (RMNode ni : rmNodes) {
 if (stateFilter != null) {
   NodeState state = ni.getState();
@@ -129,27 +129,40 @@ class NodesPage extends RmView {
 NodeInfo info = new NodeInfo(ni, sched);
 int usedMemory = (int) info.getUsedMemory();
 int availableMemory = (int) info.getAvailableMemory();
-TR>> row =
-tbody.tr().td(StringUtils.join(",", info.getNodeLabels()))
-.td(info.getRack()).td(info.getState()).td(info.getNodeId());
+nodeTableData.append("[\"")
+.append(StringUtils.join(",", 
info.getNodeLabels())).append("\",\"")
+.append(info.getRack()).append("\",\"").append(info.getState())
+.append("\",\"").append(info.getNodeId());
 if (isInactive) {
-  row.td()._("N/A")._();
+  nodeTableData.append("\",\"").append("N/A").append("\",\"");
 } else {
   String httpAddress = info.getNodeHTTPAddress();
-  row.td().a("//" + httpAddress, httpAddress)._();
+  nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\"");
 }
-row.td().br().$title(String.valueOf(info.getLastHealthUpdate()))._()
-._(Times.format(info.getLastHealthUpdate()))._()
-.td(info.getHealthReport())
-.td(String.valueOf(info.getNumContainers())).td().br()
-.$title(String.valueOf(usedMemory))._()
-._(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().td().br()
-.$title(String.valueOf(availableMemory))._()
-._(StringUtils.byteDesc(availableMemory * BYTES_IN_MB))._()
-.td(String.valueOf(info.getUsedVirtualCores()))
-.td(String.valueOf(info.getAvailableVirtualCores()))
-.td(ni.getNodeManagerVersion())._();
+nodeTableData.append("")
+.append(Times.format(info.getLastHealthUpdate())).append("\",\"")
+

[43/50] [abbrv] hadoop git commit: YARN-4311. Removing nodes from include and exclude lists will not remove them from decommissioned nodes list. Contributed by Kuhu Shukla

2016-04-05 Thread aengineer
YARN-4311. Removing nodes from include and exclude lists will not remove them 
from decommissioned nodes list. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1cbcd4a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1cbcd4a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1cbcd4a4

Branch: refs/heads/HDFS-1312
Commit: 1cbcd4a491e6a57d466c2897335614dc6770b475
Parents: 776b549
Author: Jason Lowe 
Authored: Tue Apr 5 13:40:19 2016 +
Committer: Jason Lowe 
Committed: Tue Apr 5 13:40:19 2016 +

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   9 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   9 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../src/main/resources/yarn-default.xml |  13 ++
 .../resourcemanager/NodesListManager.java   | 104 -
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../resourcemanager/ResourceTrackerService.java |   8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   4 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  22 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   9 +
 .../TestResourceTrackerService.java | 216 +--
 .../webapp/TestRMWebServicesNodes.java  |  12 +-
 12 files changed, 387 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 92d586b..951f5a8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -199,6 +199,15 @@ public class NodeInfo {
 public ResourceUtilization getNodeUtilization() {
   return null;
 }
+
+@Override
+public long getUntrackedTimeStamp() {
+  return 0;
+}
+
+@Override
+public void setUntrackedTimeStamp(long timeStamp) {
+}
   }
 
   public static RMNode newNodeInfo(String rackName, String hostName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 2e9cccb..e5013c4 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -188,4 +188,13 @@ public class RMNodeWrapper implements RMNode {
   public ResourceUtilization getNodeUtilization() {
 return node.getNodeUtilization();
   }
+
+  @Override
+  public long getUntrackedTimeStamp() {
+return 0;
+  }
+
+  @Override
+  public void setUntrackedTimeStamp(long timeStamp) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acee57..66b293f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -648,6 +648,15 @@ public class YarnConfiguration extends Configuration {
   "NONE";
 
   /**
+   * Timeout(msec) for an untracked node to remain in shutdown or 
decommissioned
+   * state.
+   */
+  public static final String RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC =
+  RM_PREFIX + "node-removal-untracked.timeout-ms";
+  public static final int
+  DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC = 6;
+
+  /**
* RM proxy users' prefix
*/
   public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1cbcd4a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xm

[03/50] [abbrv] hadoop git commit: YARN-4822. Refactor existing Preemption Policy of CS for easier adding new approach to select preemption candidates. Contributed by Wangda Tan

2016-04-05 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/60e4116b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
index 21ea495..b25 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyForNodePartitions.java
@@ -18,29 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
-import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.MONITORING_INTERVAL;
-import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.NATURAL_TERMINATION_FACTOR;
-import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.TOTAL_PREEMPTION_PER_ROUND;
-import static 
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.WAIT_TIME_BEFORE_KILL;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Matchers.isA;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeSet;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -81,6 +58,25 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeSet;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 public class TestProportionalCapacityPreemptionPolicyForNodePartitions {
   private static final Log LOG =
   
LogFactory.getLog(TestProportionalCapacityPreemptionPolicyForNodePartitions.class);
@@ -94,8 +90,7 @@ public class 
TestProportionalCapacityPreemptionPolicyForNodePartitions {
 
   private ResourceCalculator rc = new DefaultResourceCalculator();
   private Clock mClock = null;
-  private Configuration conf = null;
-  private CapacitySchedulerConfiguration csConf = null;
+  private CapacitySchedulerConfiguration conf = null;
   private CapacityScheduler cs = null;
   private EventHandler mDisp = null;
   private ProportionalCapacityPreemptionPolicy policy = null;
@@ -107,24 +102,23 @@ public class 
TestProportionalCapacityPreemptionPolicyForNodePartitions {
 org.apache.log4j.Logger.getRootLogger().setLevel(
 org.apache.log4j.Level.DEBUG);
 
-conf = new Configuration(false);
-conf.setLong(WAIT_TIME_BEFORE_KILL, 1);
-conf.setLong(MONITORING_INTERVAL, 3000);
+conf = new CapacitySchedulerConfiguration(new Configuration(false));
+conf.setLong(
+CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 
1);
+conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
+3000);
 // report "ideal" preempt
-conf.setFloat(TOTAL_PREEMPTION_PER_ROUND, (float) 1.0);
-conf.setFloat(NATURAL_TERMINATION_FACTOR, (float) 1.0);
-conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
-

[10/50] [abbrv] hadoop git commit: YARN-4884. Fix missing documentation about rmadmin command regarding node labels. Contributed by Kai Sasaki.

2016-04-05 Thread aengineer
YARN-4884. Fix missing documentation about rmadmin command regarding node 
labels. Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1b8f6b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1b8f6b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1b8f6b2

Branch: refs/heads/HDFS-1312
Commit: f1b8f6b2c16403869f78a54268ae1165982a7050
Parents: acca149
Author: Varun Vasudev 
Authored: Thu Mar 31 14:01:48 2016 +0530
Committer: Varun Vasudev 
Committed: Thu Mar 31 14:01:48 2016 +0530

--
 .../src/site/markdown/YarnCommands.md   | 45 +---
 1 file changed, 30 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1b8f6b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index 5941988..40704f0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -217,31 +217,46 @@ Start the ResourceManager
 Usage:
 
 ```
-  yarn rmadmin [-refreshQueues]
-   [-refreshNodes]
-   [-refreshUserToGroupsMapping] 
-   [-refreshSuperUserGroupsConfiguration]
-   [-refreshAdminAcls] 
-   [-refreshServiceAcl]
-   [-getGroups [username]]
-   [-transitionToActive [--forceactive] [--forcemanual] 
]
-   [-transitionToStandby [--forcemanual] ]
-   [-failover [--forcefence] [--forceactive]  
]
-   [-getServiceState ]
-   [-checkHealth ]
-   [-help [cmd]]
+  Usage: yarn rmadmin
+ -refreshQueues
+ -refreshNodes [-g [timeout in seconds]]
+ -refreshNodesResources
+ -refreshSuperUserGroupsConfiguration
+ -refreshUserToGroupsMappings
+ -refreshAdminAcls
+ -refreshServiceAcl
+ -getGroups [username]
+ -addToClusterNodeLabels 
<"label1(exclusive=true),label2(exclusive=false),label3">
+ -removeFromClusterNodeLabels  (label splitted by 
",")
+ -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2">
+ -directlyAccessNodeLabelStore
+ -refreshClusterMaxPriority
+ -updateNodeResource [NodeID] [MemSize] [vCores] ([OvercommitTimeout])
+ -transitionToActive [--forceactive] 
+ -transitionToStandby 
+ -failover [--forcefence] [--forceactive]  
+ -getServiceState 
+ -checkHealth 
+ -help [cmd]
 ```
 
 | COMMAND\_OPTIONS | Description |
 |: |: |
 | -refreshQueues | Reload the queues' acls, states and scheduler specific 
properties. ResourceManager will reload the mapred-queues configuration file. |
 | -refreshNodes | Refresh the hosts information at the ResourceManager. |
-| -refreshUserToGroupsMappings | Refresh user-to-groups mappings. |
+| -refreshNodesResources | Refresh resources of NodeManagers at the 
ResourceManager. |
 | -refreshSuperUserGroupsConfiguration | Refresh superuser proxy groups 
mappings. |
+| -refreshUserToGroupsMappings | Refresh user-to-groups mappings. |
 | -refreshAdminAcls | Refresh acls for administration of ResourceManager |
 | -refreshServiceAcl | Reload the service-level authorization policy file 
ResourceManager will reload the authorization policy file. |
 | -getGroups [username] | Get groups the specified user belongs to. |
-| -transitionToActive [--forceactive] [--forcemanual] \ | 
Transitions the service into Active state. Try to make the target active 
without checking that there is no active node if the --forceactive option is 
used. This command can not be used if automatic failover is enabled. Though you 
can override this by --forcemanual option, you need caution. |
+| -addToClusterNodeLabels 
<"label1(exclusive=true),label2(exclusive=false),label3"> | Add to cluster node 
labels. Default exclusivity is true. |
+| -removeFromClusterNodeLabels  (label splitted by ",") 
| Remove from cluster node labels. |
+| -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2"> | Replace labels on nodes (please note that we do 
not support specifying multiple labels on a single host for now.) |
+| -directlyAccessNodeLabelStore | This is DEPRECATED, will be removed in 
future releases. Directly access node label store, with this option, all node 
label related operations will not connect RM. Instead, they will access/modify 
stored node labels directly. By default, it is false (access via RM). AND 
PLEASE NOTE: if you configured yarn.node-labels.fs-

[04/50] [abbrv] hadoop git commit: YARN-4822. Refactor existing Preemption Policy of CS for easier adding new approach to select preemption candidates. Contributed by Wangda Tan

2016-04-05 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/60e4116b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 9b499c8..7e668b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -17,26 +17,13 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.PriorityQueue;
-import java.util.Set;
-import java.util.TreeSet;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
@@ -50,7 +37,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptableQueue;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
 import org.apache.hadoop.yarn.util.Clock;
@@ -58,8 +44,16 @@ import org.apache.hadoop.yarn.util.SystemClock;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableSet;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 /**
  * This class implement a {@link SchedulingEditPolicy} that is designed to be
@@ -80,79 +74,59 @@ import com.google.common.collect.ImmutableSet;
  * this policy will trigger forced termination of containers (again by 
generating
  * {@link ContainerPreemptEvent}).
  */
-public class ProportionalCapacityPreemptionPolicy implements 
SchedulingEditPolicy {
-
+public class ProportionalCapacityPreemptionPolicy
+implements SchedulingEditPolicy, CapacitySchedulerPreemptionContext {
   private static final Log LOG =
 LogFactory.getLog(ProportionalCapacityPreemptionPolicy.class);
 
-  /** If true, run the policy but do not affect the cluster with preemption and
-   * kill events. */
-  public static final String OBSERVE_ONLY =
-  "yarn.resourcemanager.monitor.capacity.preemption.observe_only";
-  /** Time in milliseconds between invocations of this policy */
-  public static final String MONITORING_INTERVAL =
-  "yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval";
-  /** Time in milliseconds between requesting a preemption from an application
-   * and killing the container. */
-  public static final String WAIT_TIME_BEFORE_KILL =
-  "yarn.resourcemanager.monitor.capacity.preemption.max_wait_before_kill";
-  /** Maximum percentage of resources preempted in a single round. By
-   * controlling this value one can throttle the pace at which containers are
-   * reclaimed from the cluster. After com

[08/50] [abbrv] hadoop git commit: HDFS-10221. Add .json to the rat exclusions. Contributed by Ming Ma.

2016-04-05 Thread aengineer
HDFS-10221. Add .json to the rat exclusions. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32c0c3ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32c0c3ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32c0c3ec

Branch: refs/heads/HDFS-1312
Commit: 32c0c3ecdf72e89a63f4aee5e75d1c5a12714b89
Parents: e4fc609
Author: Akira Ajisaka 
Authored: Thu Mar 31 09:04:09 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Mar 31 09:06:14 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32c0c3ec/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 4e1901b5..668bbfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -381,6 +381,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 src/test/all-tests
 src/test/resources/*.tgz
 src/test/resources/data*
+**/*.json
 src/test/resources/editsStored*
 src/test/resources/empty-file
 src/main/webapps/datanode/robots.txt



[16/50] [abbrv] hadoop git commit: YARN-4183. Clarify the behavior of timeline service config properties (Naganarasimha G R via sjlee)

2016-04-05 Thread aengineer
YARN-4183. Clarify the behavior of timeline service config properties 
(Naganarasimha G R via sjlee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d67420d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d67420d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d67420d

Branch: refs/heads/HDFS-1312
Commit: 6d67420dbc5c6097216fa40fcec8ed626b2bae14
Parents: d95c6eb
Author: Sangjin Lee 
Authored: Thu Mar 31 10:49:03 2016 -0700
Committer: Sangjin Lee 
Committed: Thu Mar 31 10:49:03 2016 -0700

--
 .../src/main/resources/yarn-default.xml| 13 ++---
 .../src/site/markdown/TimelineServer.md|  6 +++---
 2 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d67420d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 33cd919..cb3c73a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1874,8 +1874,12 @@
   
 
   
-Indicate to clients whether timeline service is enabled or 
not.
-If enabled, clients will put entities and events to the timeline server.
+
+In the server side it indicates whether timeline service is enabled or not.
+And in the client side, users can enable it to indicate whether client 
wants
+to use timeline service. If it's enabled in the client side along with
+security, then yarn client tries to fetch the delegation tokens for the
+timeline server.
 
 yarn.timeline-service.enabled
 false
@@ -2027,7 +2031,10 @@
   
 
   
-Client policy for whether timeline operations are 
non-fatal
+Client policy for whether timeline operations are non-fatal.
+Should the failure to obtain a delegation token be considered an 
application
+failure (option = false),  or should the client attempt to continue to
+publish information without it (option=true)
 yarn.timeline-service.client.best-effort
 false
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d67420d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 8ef7d9a..9283e58 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -141,7 +141,7 @@ and cluster operators.
 
 | Configuration Property | Description |
 |: |: |
-| `yarn.timeline-service.enabled` | Indicate to clients whether Timeline 
service is enabled or not. If enabled, the `TimelineClient` library used by 
applications will post entities and events to the Timeline server. Defaults to 
`false`. |
+| `yarn.timeline-service.enabled` | In the server side it indicates whether 
timeline service is enabled or not. And in the client side, users can enable it 
to indicate whether client wants to use timeline service. If it's enabled in 
the client side along with security, then yarn client tries to fetch the 
delegation tokens for the timeline server. Defaults to `false`. |
 | `yarn.resourcemanager.system-metrics-publisher.enabled` | The setting that 
controls whether or not YARN system metrics are published on the timeline 
server by RM. Defaults to `false`. |
 | `yarn.timeline-service.generic-application-history.enabled` | Indicate to 
clients whether to query generic application data from timeline history-service 
or not. If not enabled then application data is queried only from Resource 
Manager. Defaults to `false`. |
 
@@ -150,7 +150,7 @@ and cluster operators.
 | Configuration Property | Description |
 |: |: |
 | `yarn.timeline-service.store-class` | Store class name for timeline store. 
Defaults to `org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore`. |
-| `yarn.timeline-service.leveldb-timeline-store.path` | Store file name for 
leveldb timeline store. Defaults to `${hadoop.tmp.dir}/yarn/timelin`e. |
+| `yarn.timeline-service.leveldb-timeline-store.path` | Store file name for 
leveldb timeline store. Defaults to `${hadoop.tmp.dir}/yarn/timeline`. |
 | `yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms` |

[25/50] [abbrv] hadoop git commit: YARN-4895. Add subtractFrom method to ResourceUtilization class. Contributed by Konstantinos Karanasos.

2016-04-05 Thread aengineer
YARN-4895. Add subtractFrom method to ResourceUtilization class. Contributed by 
Konstantinos Karanasos.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82621e38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82621e38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82621e38

Branch: refs/heads/HDFS-1312
Commit: 82621e38a0445832998bc00693279e23a98605c1
Parents: 256c82f
Author: Arun Suresh 
Authored: Fri Apr 1 14:57:06 2016 -0700
Committer: Arun Suresh 
Committed: Fri Apr 1 14:57:06 2016 -0700

--
 .../yarn/api/records/ResourceUtilization.java   | 22 
 1 file changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82621e38/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
index 5f52f85..2ae4872 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
@@ -44,6 +44,14 @@ public abstract class ResourceUtilization implements
 return utilization;
   }
 
+  @Public
+  @Unstable
+  public static ResourceUtilization newInstance(
+  ResourceUtilization resourceUtil) {
+return newInstance(resourceUtil.getPhysicalMemory(),
+resourceUtil.getVirtualMemory(), resourceUtil.getCPU());
+  }
+
   /**
* Get used virtual memory.
*
@@ -147,4 +155,18 @@ public abstract class ResourceUtilization implements
 this.setVirtualMemory(this.getVirtualMemory() + vmem);
 this.setCPU(this.getCPU() + cpu);
   }
+
+  /**
+   * Subtract utilization from the current one.
+   * @param pmem Physical memory to be subtracted.
+   * @param vmem Virtual memory to be subtracted.
+   * @param cpu CPU utilization to be subtracted.
+   */
+  @Public
+  @Unstable
+  public void subtractFrom(int pmem, int vmem, float cpu) {
+this.setPhysicalMemory(this.getPhysicalMemory() - pmem);
+this.setVirtualMemory(this.getVirtualMemory() - vmem);
+this.setCPU(this.getCPU() - cpu);
+  }
 }
\ No newline at end of file



[18/50] [abbrv] hadoop git commit: YARN-4811. Generate histograms in ContainerMetrics for actual container resource usage

2016-04-05 Thread aengineer
YARN-4811. Generate histograms in ContainerMetrics for actual container 
resource usage


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0dd9bcab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0dd9bcab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0dd9bcab

Branch: refs/heads/HDFS-1312
Commit: 0dd9bcab97ccdf24a2174636604110b74664cf80
Parents: 7a02147
Author: Jian He 
Authored: Thu Mar 31 14:28:13 2016 -0700
Committer: Jian He 
Committed: Thu Mar 31 14:28:13 2016 -0700

--
 .../hadoop/metrics2/lib/MutableQuantiles.java   |  7 +-
 .../hadoop/metrics2/util/QuantileEstimator.java | 32 +
 .../hadoop/metrics2/util/SampleQuantiles.java   |  2 +-
 .../hadoop-yarn-server-nodemanager/pom.xml  |  5 ++
 .../monitor/ContainerMetrics.java   | 69 
 .../monitor/TestContainerMetrics.java   | 58 +++-
 6 files changed, 170 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9bcab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
index 2e6053f..a4711db 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableQuantiles.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.util.Quantile;
+import org.apache.hadoop.metrics2.util.QuantileEstimator;
 import org.apache.hadoop.metrics2.util.SampleQuantiles;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -54,7 +55,7 @@ public class MutableQuantiles extends MutableMetric {
   private final MetricsInfo[] quantileInfos;
   private final int interval;
 
-  private SampleQuantiles estimator;
+  private QuantileEstimator estimator;
   private long previousCount = 0;
 
   @VisibleForTesting
@@ -134,6 +135,10 @@ public class MutableQuantiles extends MutableMetric {
 return interval;
   }
 
+  public synchronized void setEstimator(QuantileEstimator quantileEstimator) {
+this.estimator = quantileEstimator;
+  }
+
   /**
* Runnable used to periodically roll over the internal
* {@link SampleQuantiles} every interval.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9bcab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
new file mode 100644
index 000..075b879
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/QuantileEstimator.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.util;
+
+import java.util.Map;
+
+public interface QuantileEstimator {
+
+  void insert(long value);
+
+  Map snapshot();
+
+  long getCount();
+
+  void clear();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dd9bcab/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/m

[45/50] [abbrv] hadoop git commit: HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination paths. Contributed by Kuhu Shukla.

2016-04-05 Thread aengineer
HDFS-10239. Fsshell mv fails if port usage doesn't match in src and destination 
paths. Contributed by Kuhu Shukla.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91746450
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91746450
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91746450

Branch: refs/heads/HDFS-1312
Commit: 917464505c0e930ebeb4c775d829e51c56a48686
Parents: 6be28bc
Author: Kihwal Lee 
Authored: Tue Apr 5 09:07:24 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 09:07:24 2016 -0500

--
 .../apache/hadoop/fs/shell/MoveCommands.java|  6 +++-
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 31 
 2 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
index 02a3b25..d359282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
@@ -100,7 +100,11 @@ class MoveCommands {
 
 @Override
 protected void processPath(PathData src, PathData target) throws 
IOException {
-  if (!src.fs.getUri().equals(target.fs.getUri())) {
+  String srcUri = src.fs.getUri().getScheme() + "://" +
+  src.fs.getUri().getHost();
+  String dstUri = target.fs.getUri().getScheme() + "://" +
+  target.fs.getUri().getHost();
+  if (!srcUri.equals(dstUri)) {
 throw new PathIOException(src.toString(),
 "Does not match target filesystem");
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91746450/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 41cd5c0..b75ac11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -559,6 +559,37 @@ public class TestDFSShell {
 }
   }
 
+  @Test
+  public void testMoveWithTargetPortEmpty() throws Exception {
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf)
+  .format(true)
+  .numDataNodes(2)
+  .nameNodePort(8020)
+  .waitSafeMode(true)
+  .build();
+  FileSystem srcFs = cluster.getFileSystem();
+  FsShell shell = new FsShell();
+  shell.setConf(conf);
+  String[] argv = new String[2];
+  argv[0] = "-mkdir";
+  argv[1] = "/testfile";
+  ToolRunner.run(shell, argv);
+  argv = new String[3];
+  argv[0] = "-mv";
+  argv[1] = srcFs.getUri() + "/testfile";
+  argv[2] = "hdfs://localhost/testfile2";
+  int ret = ToolRunner.run(shell, argv);
+  assertEquals("mv should have succeeded", 0, ret);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
   @Test (timeout = 3)
   public void testURIPaths() throws Exception {
 Configuration srcConf = new HdfsConfiguration();



[48/50] [abbrv] hadoop git commit: YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi via iwasakims)

2016-04-05 Thread aengineer
YARN-4915. Fix typo in YARN Secure Containers documentation (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30206346
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30206346
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30206346

Branch: refs/heads/HDFS-1312
Commit: 30206346cf13fe1b7267f86e7c210b77c86b88c9
Parents: 85ec557
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:47:22 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:47:22 2016 +0900

--
 .../hadoop-yarn-site/src/site/markdown/SecureContainer.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30206346/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
index cd4f913..f7706c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/SecureContainer.md
@@ -114,7 +114,7 @@ min.user.id=1000#Prevent other super-users
 
   `yarn.nodemanager.windows-secure-container-executor.impersonate.allowed` 
should contain the users that are allowed to create containers in the cluster. 
These users will be allowed to be impersonated by hadoopwinutilsvc.
 
-  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explictly forbiden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
+  `yarn.nodemanager.windows-secure-container-executor.impersonate.denied` 
should contain users that are explicitly forbidden from creating containers. 
hadoopwinutilsvc will refuse to impersonate these users.
 
   `yarn.nodemanager.windows-secure-container-executor.local-dirs` should 
contain the nodemanager local dirs. hadoopwinutilsvc will allow only file 
operations under these directories. This should contain the same values as 
`$yarn.nodemanager.local-dirs, $yarn.nodemanager.log-dirs` but note that 
hadoopwinutilsvc XML configuration processing does not do substitutions so the 
value must be the final value. All paths must be absolute and no environment 
variable substitution will be performed. The paths are compared 
LOCAL\_INVARIANT case insensitive string comparison, the file path validated 
must start with one of the paths listed in local-dirs configuration. Use comma 
as path separator:`,`
 



[47/50] [abbrv] hadoop git commit: HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)

2016-04-05 Thread aengineer
HADOOP-12672. RPC timeout should not override IPC ping interval (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85ec5573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85ec5573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85ec5573

Branch: refs/heads/HDFS-1312
Commit: 85ec5573eb9fd746a9295ecc6fe1ae683073aaf5
Parents: 0005816
Author: Masatake Iwasaki 
Authored: Wed Apr 6 03:22:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 03:22:48 2016 +0900

--
 .../main/java/org/apache/hadoop/ipc/Client.java | 57 +
 .../src/main/resources/core-default.xml |  9 ++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 67 ++--
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |  2 +-
 4 files changed, 108 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85ec5573/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 7e6c7e3..fb11cb7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -239,14 +239,33 @@ public class Client {
* 
* @param conf Configuration
* @return the timeout period in milliseconds. -1 if no timeout value is set
+   * @deprecated use {@link #getRpcTimeout(Configuration)} instead
*/
+  @Deprecated
   final public static int getTimeout(Configuration conf) {
+int timeout = getRpcTimeout(conf);
+if (timeout > 0)  {
+  return timeout;
+}
 if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,
 CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) {
   return getPingInterval(conf);
 }
 return -1;
   }
+
+  /**
+   * The time after which a RPC will timeout.
+   *
+   * @param conf Configuration
+   * @return the timeout period in milliseconds.
+   */
+  public static final int getRpcTimeout(Configuration conf) {
+int timeout =
+conf.getInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY,
+CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_DEFAULT);
+return (timeout < 0) ? 0 : timeout;
+  }
   /**
* set the connection timeout value in configuration
* 
@@ -386,7 +405,7 @@ public class Client {
 private Socket socket = null; // connected socket
 private DataInputStream in;
 private DataOutputStream out;
-private int rpcTimeout;
+private final int rpcTimeout;
 private int maxIdleTime; //connections will be culled if it was idle for 
 //maxIdleTime msecs
 private final RetryPolicy connectionRetryPolicy;
@@ -394,8 +413,9 @@ public class Client {
 private int maxRetriesOnSocketTimeouts;
 private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
 private final boolean tcpLowLatency; // if T then use low-delay QoS
-private boolean doPing; //do we need to send ping message
-private int pingInterval; // how often sends ping to the server in msecs
+private final boolean doPing; //do we need to send ping message
+private final int pingInterval; // how often sends ping to the server
+private final int soTimeout; // used by ipc ping and rpc timeout
 private ByteArrayOutputStream pingRequest; // ping message
 
 // currently active calls
@@ -434,6 +454,14 @@ public class Client {
 pingHeader.writeDelimitedTo(pingRequest);
   }
   this.pingInterval = remoteId.getPingInterval();
+  if (rpcTimeout > 0) {
+// effective rpc timeout is rounded up to multiple of pingInterval
+// if pingInterval < rpcTimeout.
+this.soTimeout = (doPing && pingInterval < rpcTimeout) ?
+pingInterval : rpcTimeout;
+  } else {
+this.soTimeout = pingInterval;
+  }
   this.serviceClass = serviceClass;
   if (LOG.isDebugEnabled()) {
 LOG.debug("The ping interval is " + this.pingInterval + " ms.");
@@ -484,12 +512,12 @@ public class Client {
 
   /* Process timeout exception
* if the connection is not going to be closed or 
-   * is not configured to have a RPC timeout, send a ping.
-   * (if rpcTimeout is not set to be 0, then RPC should timeout.
-   * otherwise, throw the timeout exception.
+   * the RPC is not timed out yet, send a ping.
*/
-  private void handleTimeout(SocketTimeoutException e) throws IOException {
-if (shouldCloseConnection.get() || !running.get() || rpc

[23/50] [abbrv] hadoop git commit: Revert "YARN-4857. Add missing default configuration regarding preemption of CapacityScheduler. Contributed by Kai Sasaki."

2016-04-05 Thread aengineer
Revert "YARN-4857. Add missing default configuration regarding preemption of 
CapacityScheduler. Contributed by Kai Sasaki."

This reverts commit 0064cba169d1bb761f6e81ee86830be598d7c500.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3488c4f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3488c4f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3488c4f2

Branch: refs/heads/HDFS-1312
Commit: 3488c4f2c9767684eb1007bb00250f474c06d5d8
Parents: a8d8b80
Author: Varun Vasudev 
Authored: Fri Apr 1 12:20:40 2016 +0530
Committer: Varun Vasudev 
Committed: Fri Apr 1 12:20:40 2016 +0530

--
 .../src/main/resources/yarn-default.xml | 58 
 1 file changed, 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3488c4f2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index cb3c73a..506cf3d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -908,64 +908,6 @@
 60
   
 
-  
-
-If true, run the policy but do not affect the cluster with preemption and 
kill events.
-
-yarn.resourcemanager.monitor.capacity.preemption.observe_only
-false
-  
-
-  
-
-Time in milliseconds between invocations of this 
ProportionalCapacityPreemptionPolicy
-policy.
-
-
yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
-3000
-  
-
-  
-
-Time in milliseconds between requesting a preemption from an application 
and killing
-the container.
-
-
yarn.resourcemanager.monitor.capacity.preemption.max_wait_before_kill
-15000
-  
-
-  
-
-Maximum percentage of resources preempted in a single round. By 
controlling this valueone
-can throttle the pace at which containers are reclaimed from the cluster. 
After computing
-the total desired preemption, the policy scales it back within this limit.
-
-
yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
-0.1
-  
-
-  
-
-Maximum amount of resources above the target capacity ignored for 
preemption.
-This defines a deadzone around the target capacity that helps prevent 
thrashing and
-oscillations around the computed target balance. High values would slow 
the time to capacity
-and (absent natural.completions) it might prevent convergence to 
guaranteed capacity.
-
-
yarn.resourcemanager.monitor.capacity.preemption.max_ignored_over_capacity
-0.1
-  
-
-  
-
-Given a computed preemption target, account for containers naturally 
expiring and preempt
-only this percentage of the delta. This determines the rate of geometric 
convergence into
-the deadzone (MAX_IGNORED_OVER_CAPACITY). For example, a termination 
factor of 0.5 will reclaim
-almost 95% of resources within 5 * #WAIT_TIME_BEFORE_KILL, even absent 
natural termination.
-
-
yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
-0.2
-  
-
   
 
   



[39/50] [abbrv] hadoop git commit: HDFS-8496. Calling stopWriter() with FSDatasetImpl lock held may block other threads (cmccabe)

2016-04-05 Thread aengineer
HDFS-8496. Calling stopWriter() with FSDatasetImpl lock held may block other 
threads (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6b1a818
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6b1a818
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6b1a818

Branch: refs/heads/HDFS-1312
Commit: f6b1a818124cc42688c4c5acaf537d96cf00e43b
Parents: f65f5b1
Author: Colin Patrick Mccabe 
Authored: Mon Apr 4 18:00:26 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 4 18:02:15 2016 -0700

--
 .../hdfs/server/datanode/ReplicaInPipeline.java |  54 ---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 145 +--
 .../datanode/fsdataset/impl/ReplicaMap.java |   2 +-
 .../hdfs/server/datanode/TestBlockRecovery.java | 137 --
 4 files changed, 257 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6b1a818/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index 5caca15..7326846 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -22,6 +22,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@@ -44,7 +45,7 @@ public class ReplicaInPipeline extends ReplicaInfo
   private long bytesAcked;
   private long bytesOnDisk;
   private byte[] lastChecksum;  
-  private Thread writer;
+  private AtomicReference writer = new AtomicReference();
 
   /**
* Bytes reserved for this replica on the containing volume.
@@ -97,7 +98,7 @@ public class ReplicaInPipeline extends ReplicaInfo
 super( blockId, len, genStamp, vol, dir);
 this.bytesAcked = len;
 this.bytesOnDisk = len;
-this.writer = writer;
+this.writer.set(writer);
 this.bytesReserved = bytesToReserve;
 this.originalBytesReserved = bytesToReserve;
   }
@@ -110,7 +111,7 @@ public class ReplicaInPipeline extends ReplicaInfo
 super(from);
 this.bytesAcked = from.getBytesAcked();
 this.bytesOnDisk = from.getBytesOnDisk();
-this.writer = from.writer;
+this.writer.set(from.writer.get());
 this.bytesReserved = from.bytesReserved;
 this.originalBytesReserved = from.originalBytesReserved;
   }
@@ -175,18 +176,11 @@ public class ReplicaInPipeline extends ReplicaInfo
 return new ChunkChecksum(getBytesOnDisk(), lastChecksum);
   }
 
-  /**
-   * Set the thread that is writing to this replica
-   * @param writer a thread writing to this replica
-   */
-  public void setWriter(Thread writer) {
-this.writer = writer;
-  }
-  
   public void interruptThread() {
-if (writer != null && writer != Thread.currentThread() 
-&& writer.isAlive()) {
-  this.writer.interrupt();
+Thread thread = writer.get();
+if (thread != null && thread != Thread.currentThread() 
+&& thread.isAlive()) {
+  thread.interrupt();
 }
   }
 
@@ -196,17 +190,35 @@ public class ReplicaInPipeline extends ReplicaInfo
   }
   
   /**
+   * Attempt to set the writer to a new value.
+   */
+  public boolean attemptToSetWriter(Thread prevWriter, Thread newWriter) {
+return writer.compareAndSet(prevWriter, newWriter);
+  }
+
+  /**
* Interrupt the writing thread and wait until it dies
* @throws IOException the waiting is interrupted
*/
   public void stopWriter(long xceiverStopTimeout) throws IOException {
-if (writer != null && writer != Thread.currentThread() && 
writer.isAlive()) {
-  writer.interrupt();
+while (true) {
+  Thread thread = writer.get();
+  if ((thread == null) || (thread == Thread.currentThread()) ||
+  (!thread.isAlive())) {
+if (writer.compareAndSet(thread, null) == true) {
+  return; // Done
+}
+// The writer changed.  Go back to the start of the loop and attempt to
+// stop the new writer.
+continue;
+  }
+  thread.interrupt();
   try {
-writer.join(xceiverStopTimeout);
-if (writer.isAlive()) {
-  final String msg = "Join on writer thread " + writer + " timed out";
-

[26/50] [abbrv] hadoop git commit: Missing file for YARN-4895.

2016-04-05 Thread aengineer
Missing file for YARN-4895.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5686caa9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5686caa9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5686caa9

Branch: refs/heads/HDFS-1312
Commit: 5686caa9fcb59759c9286385575f31e407a97c16
Parents: 82621e3
Author: Arun Suresh 
Authored: Fri Apr 1 15:58:13 2016 -0700
Committer: Arun Suresh 
Committed: Fri Apr 1 15:58:13 2016 -0700

--
 .../api/records/TestResourceUtilization.java| 63 
 1 file changed, 63 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5686caa9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
new file mode 100644
index 000..5934846
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/TestResourceUtilization.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestResourceUtilization {
+
+  @Test
+  public void testResourceUtilization() {
+ResourceUtilization u1 = ResourceUtilization.newInstance(10, 20, 0.5f);
+ResourceUtilization u2 = ResourceUtilization.newInstance(u1);
+ResourceUtilization u3 = ResourceUtilization.newInstance(10, 20, 0.5f);
+ResourceUtilization u4 = ResourceUtilization.newInstance(20, 20, 0.5f);
+ResourceUtilization u5 = ResourceUtilization.newInstance(30, 40, 0.8f);
+
+Assert.assertEquals(u1, u2);
+Assert.assertEquals(u1, u3);
+Assert.assertNotEquals(u1, u4);
+Assert.assertNotEquals(u2, u5);
+Assert.assertNotEquals(u4, u5);
+
+Assert.assertTrue(u1.hashCode() == u2.hashCode());
+Assert.assertTrue(u1.hashCode() == u3.hashCode());
+Assert.assertFalse(u1.hashCode() == u4.hashCode());
+Assert.assertFalse(u2.hashCode() == u5.hashCode());
+Assert.assertFalse(u4.hashCode() == u5.hashCode());
+
+Assert.assertTrue(u1.getPhysicalMemory() == 10);
+Assert.assertFalse(u1.getVirtualMemory() == 10);
+Assert.assertTrue(u1.getCPU() == 0.5f);
+
+Assert.assertEquals("", u1.toString());
+
+u1.addTo(10, 0, 0.0f);
+Assert.assertNotEquals(u1, u2);
+Assert.assertEquals(u1, u4);
+u1.addTo(10, 20, 0.3f);
+Assert.assertEquals(u1, u5);
+u1.subtractFrom(10, 20, 0.3f);
+Assert.assertEquals(u1, u4);
+u1.subtractFrom(10, 0, 0.0f);
+Assert.assertEquals(u1, u3);
+  }
+}



[14/50] [abbrv] hadoop git commit: HADOOP-12916. Allow RPC scheduler/callqueue backoff using response times. Contributed by Xiaoyu Yao.

2016-04-05 Thread aengineer
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d95c6eb3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 99bfc61..2ebd1c5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -43,8 +43,10 @@ import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
 import org.apache.hadoop.test.MockitoUtil;
+import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -956,7 +958,7 @@ public class TestRPC extends TestRpcBase {
   }
 
   /**
-   *  Test RPC backoff.
+   *  Test RPC backoff by queue full.
*/
   @Test (timeout=3)
   public void testClientBackOff() throws Exception {
@@ -969,7 +971,7 @@ public class TestRPC extends TestRpcBase {
 final ExecutorService executorService =
 Executors.newFixedThreadPool(numClients);
 conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-conf.setBoolean(CommonConfigurationKeys.IPC_CALLQUEUE_NAMESPACE +
+conf.setBoolean(CommonConfigurationKeys.IPC_NAMESPACE +
 ".0." + CommonConfigurationKeys.IPC_BACKOFF_ENABLE, true);
 RPC.Builder builder = newServerBuilder(conf)
 .setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true);
@@ -1019,6 +1021,92 @@ public class TestRPC extends TestRpcBase {
   }
 
   /**
+   *  Test RPC backoff by response time of each priority level.
+   */
+  @Test (timeout=3)
+  public void testClientBackOffByResponseTime() throws Exception {
+Server server;
+final TestRpcService proxy;
+boolean succeeded = false;
+final int numClients = 1;
+final int queueSizePerHandler = 3;
+
+GenericTestUtils.setLogLevel(DecayRpcScheduler.LOG, Level.DEBUG);
+GenericTestUtils.setLogLevel(RPC.LOG, Level.DEBUG);
+
+final List> res = new ArrayList>();
+final ExecutorService executorService =
+Executors.newFixedThreadPool(numClients);
+conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0.";
+conf.setBoolean(ns + CommonConfigurationKeys.IPC_BACKOFF_ENABLE, true);
+conf.setStrings(ns + CommonConfigurationKeys.IPC_CALLQUEUE_IMPL_KEY,
+"org.apache.hadoop.ipc.FairCallQueue");
+conf.setStrings(ns + CommonConfigurationKeys.IPC_SCHEDULER_IMPL_KEY,
+"org.apache.hadoop.ipc.DecayRpcScheduler");
+conf.setInt(ns + CommonConfigurationKeys.IPC_SCHEDULER_PRIORITY_LEVELS_KEY,
+2);
+conf.setBoolean(ns +
+DecayRpcScheduler.IPC_DECAYSCHEDULER_BACKOFF_RESPONSETIME_ENABLE_KEY,
+true);
+// set a small thresholds 2s and 4s for level 0 and level 1 for testing
+conf.set(ns +
+
DecayRpcScheduler.IPC_DECAYSCHEDULER_BACKOFF_RESPONSETIME_THRESHOLDS_KEY
+, "2s, 4s");
+
+// Set max queue size to 3 so that 2 calls from the test won't trigger
+// back off because the queue is full.
+RPC.Builder builder = newServerBuilder(conf)
+.setQueueSizePerHandler(queueSizePerHandler).setNumHandlers(1)
+.setVerbose(true);
+server = setupTestServer(builder);
+
+@SuppressWarnings("unchecked")
+CallQueueManager spy = spy((CallQueueManager) Whitebox
+.getInternalState(server, "callQueue"));
+Whitebox.setInternalState(server, "callQueue", spy);
+
+Exception lastException = null;
+proxy = getClient(addr, conf);
+try {
+  // start a sleep RPC call that sleeps 3s.
+  for (int i = 0; i < numClients; i++) {
+res.add(executorService.submit(
+new Callable() {
+  @Override
+  public Void call() throws ServiceException, InterruptedException 
{
+proxy.sleep(null, newSleepRequest(3000));
+return null;
+  }
+}));
+verify(spy, timeout(500).times(i + 
1)).offer(Mockito.anyObject());
+  }
+  // Start another sleep RPC call and verify the call is backed off due to
+  // avg response time(3s) exceeds threshold (2s).
+  try {
+// wait for the 1st response time update
+Thread.sleep(5500);
+proxy.sleep(null, newSleepRequest(100));
+  } catch (ServiceException e) {
+RemoteException re = (RemoteException) e.getCause();
+IOException unwrapExeption = re.unwrapRemoteException()

[37/50] [abbrv] hadoop git commit: YARN-4706. UI Hosting Configuration in TimelineServer doc is broken. (Akira AJISAKA via gtcarrera9)

2016-04-05 Thread aengineer
YARN-4706. UI Hosting Configuration in TimelineServer doc is broken. (Akira 
AJISAKA via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f61de417
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f61de417
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f61de417

Branch: refs/heads/HDFS-1312
Commit: f61de4173684aa1767cef20b3cb4d54df20273cd
Parents: a7d1fb0
Author: Li Lu 
Authored: Mon Apr 4 14:39:47 2016 -0700
Committer: Li Lu 
Committed: Mon Apr 4 14:40:27 2016 -0700

--
 .../hadoop-yarn-site/src/site/markdown/TimelineServer.md   | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f61de417/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 9283e58..f20bd2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -56,6 +56,7 @@ With the introduction of the timeline server, the Application 
History Server bec
 the Timeline Server.
 
 Generic information includes application level data such as 
+
 * queue-name, 
 * user information and the like set in the `ApplicationSubmissionContext`,
 * a list of application-attempts that ran for an application
@@ -192,6 +193,7 @@ selected if this policy is `HTTPS_ONLY`.
  UI Hosting Configuration
 
 The timeline service can host multiple UIs if enabled. The service can support 
both static web sites hosted in a directory or war files bundled. The web UI is 
then hosted on the timeline service HTTP port under the path configured.
+
 | Configuration Property | Description |
 |: |: |
 | `yarn.timeline-service.ui-names` | Comma separated list of UIs that will be 
hosted. Defaults to `none`. |



[49/50] [abbrv] hadoop git commit: YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi via iwasakims)

2016-04-05 Thread aengineer
YARN-4917. Fix typos in documentation of Capacity Scheduler. (Takashi Ohnishi 
via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/500e5a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/500e5a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/500e5a59

Branch: refs/heads/HDFS-1312
Commit: 500e5a5952f8f34bf0e1e2653fa01b357d68cc8f
Parents: 3020634
Author: Masatake Iwasaki 
Authored: Wed Apr 6 04:00:31 2016 +0900
Committer: Masatake Iwasaki 
Committed: Wed Apr 6 04:00:31 2016 +0900

--
 .../src/site/markdown/CapacityScheduler.md| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/500e5a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index e86c4f9..8c0b8c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -55,11 +55,11 @@ The `CapacityScheduler` supports the following features:
 
 * **Hierarchical Queues** - Hierarchy of queues is supported to ensure 
resources are shared among the sub-queues of an organization before other 
queues are allowed to use free resources, there-by providing more control and 
predictability.
 
-* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Adminstrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
+* **Capacity Guarantees** - Queues are allocated a fraction of the capacity of 
the grid in the sense that a certain capacity of resources will be at their 
disposal. All applications submitted to a queue will have access to the 
capacity allocated to the queue. Administrators can configure soft limits and 
optional hard limits on the capacity allocated to each queue.
 
 * **Security** - Each queue has strict ACLs which controls which users can 
submit applications to individual queues. Also, there are safe-guards to ensure 
that users cannot view and/or modify applications from other users. Also, 
per-queue and system administrator roles are supported.
 
-* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artifical silos of resources in the cluster which helps utilization.
+* **Elasticity** - Free resources can be allocated to any queue beyond its 
capacity. When there is demand for these resources from queues running below 
capacity at a future point in time, as tasks scheduled on these resources 
complete, they will be assigned to applications on queues running below the 
capacity (pre-emption is also supported). This ensures that resources are 
available in a predictable and elastic manner to queues, thus preventing 
artificial silos of resources in the cluster which helps utilization.
 
 * **Multi-tenancy** - Comprehensive set of limits are provided to prevent a 
single application, user and queue from monopolizing resources of the queue or 
the cluster as a whole to ensure that the cluster isn't overwhelmed.
 
@@ -67,9 +67,9 @@ The `CapacityScheduler` supports the following features:
 
 * Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime.
 
-* Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queueus*. Existing applications 
continue to completion, thus the queue can be *drain

[19/50] [abbrv] hadoop git commit: YARN-4634. Scheduler UI/Metrics need to consider cases like non-queue label mappings. (Sunil G via wangda)

2016-04-05 Thread aengineer
YARN-4634. Scheduler UI/Metrics need to consider cases like non-queue label 
mappings. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12b11e2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12b11e2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12b11e2e

Branch: refs/heads/HDFS-1312
Commit: 12b11e2e688158404feeb3ded37eb6cccad4ea5c
Parents: 0dd9bca
Author: Wangda Tan 
Authored: Thu Mar 31 14:32:37 2016 -0700
Committer: Wangda Tan 
Committed: Thu Mar 31 14:35:18 2016 -0700

--
 .../webapp/CapacitySchedulerPage.java   | 16 ++--
 1 file changed, 14 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b11e2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 033152a..5abc250 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -405,8 +405,20 @@ class CapacitySchedulerPage extends RmView {
 CapacitySchedulerInfo sinfo = new CapacitySchedulerInfo(root, cs);
 csqinfo.csinfo = sinfo;
 
-if (null == nodeLabelsInfo || (nodeLabelsInfo.size() == 1
-&& nodeLabelsInfo.get(0).getLabelName().isEmpty())) {
+boolean hasAnyLabelLinkedToNM = false;
+if (null != nodeLabelsInfo) {
+  for (RMNodeLabel label : nodeLabelsInfo) {
+if (label.getLabelName().length() == 0) {
+  // Skip DEFAULT_LABEL
+  continue;
+}
+if (label.getNumActiveNMs() > 0) {
+  hasAnyLabelLinkedToNM = true;
+  break;
+}
+  }
+}
+if (!hasAnyLabelLinkedToNM) {
   used = sinfo.getUsedCapacity() / 100;
   //label is not enabled in the cluster or there's only "default" 
label,
   ul.li().



[46/50] [abbrv] hadoop git commit: YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.

2016-04-05 Thread aengineer
YARN-4916. TestNMProxy.tesNMProxyRPCRetry fails. Contributed by Tibor Kiss.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00058167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00058167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00058167

Branch: refs/heads/HDFS-1312
Commit: 00058167431475c6e63c80207424f1d365569e3a
Parents: 9174645
Author: Junping Du 
Authored: Tue Apr 5 09:01:08 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 09:01:08 2016 -0700

--
 .../yarn/server/nodemanager/containermanager/TestNMProxy.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00058167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
index 7ce15c5..46b32de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestNMProxy.java
@@ -188,8 +188,7 @@ public class TestNMProxy extends BaseContainerManagerTest {
   Assert.fail("should get socket exception");
 } catch (IOException e) {
   // socket exception should be thrown immediately, without RPC retries.
-  Assert.assertTrue(e.toString().
-  contains("Failed on local exception: java.net.SocketException"));
+  Assert.assertTrue(e instanceof java.net.SocketException);
 }
   }
 



[22/50] [abbrv] hadoop git commit: HADOOP-11661. Deprecate FileUtil#copyMerge. Contributed by Brahma Reddy Battula.

2016-04-05 Thread aengineer
HADOOP-11661. Deprecate FileUtil#copyMerge. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8d8b80a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8d8b80a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8d8b80a

Branch: refs/heads/HDFS-1312
Commit: a8d8b80a205c78bf0aa65d32a6a83c16d3ea3f0b
Parents: aac4d65
Author: Akira Ajisaka 
Authored: Fri Apr 1 13:59:14 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Apr 1 13:59:14 2016 +0900

--
 .../hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java  | 1 +
 .../src/test/java/org/apache/hadoop/fs/TestFileUtil.java| 1 +
 2 files changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8d8b80a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index e74c41c..b855c48 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -381,6 +381,7 @@ public class FileUtil {
 
   }
 
+  @Deprecated
   /** Copy all files in a directory to one output file (merge). */
   public static boolean copyMerge(FileSystem srcFS, Path srcDir,
   FileSystem dstFS, Path dstFile,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8d8b80a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index 5fc0b2d..f7464b7 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -559,6 +559,7 @@ public class TestFileUtil {
* @return boolean true if the call to FileUtil.copyMerge was successful.
* @throws IOException if an I/O error occurs.
*/
+  @SuppressWarnings("deprecation")
   private boolean copyMerge(String src, String dst)
   throws IOException {
 Configuration conf = new Configuration();



[38/50] [abbrv] hadoop git commit: HADOOP-12959. Add additional github web site for ISA-L library (Li Bo via cmccabe)

2016-04-05 Thread aengineer
HADOOP-12959. Add additional github web site for ISA-L library (Li Bo via 
cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f65f5b18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f65f5b18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f65f5b18

Branch: refs/heads/HDFS-1312
Commit: f65f5b18fd4647e868b8d2a2c035a3b64dc16aa8
Parents: f61de41
Author: Colin Patrick Mccabe 
Authored: Mon Apr 4 16:30:32 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Mon Apr 4 16:30:32 2016 -0700

--
 BUILDING.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f65f5b18/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 408cae1..c7a91da 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -75,6 +75,7 @@ Optional packages:
   $ sudo apt-get install snappy libsnappy-dev
 * Intel ISA-L library for erasure coding
   Please refer to 
https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
+  (OR https://github.com/01org/isa-l)
 * Bzip2
   $ sudo apt-get install bzip2 libbz2-dev
 * Jansson (C Library for JSON)
@@ -188,11 +189,12 @@ Maven build goals:
 
  Intel ISA-L build options:
 
-   Intel ISA-L is a erasure coding library that can be utilized by the native 
code.
+   Intel ISA-L is an erasure coding library that can be utilized by the native 
code.
It is currently an optional component, meaning that Hadoop can be built with
or without this dependency. Note the library is used via dynamic module. 
Please
reference the official site for the library details.
https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version
+   (OR https://github.com/01org/isa-l)
 
   * Use -Drequire.isal to fail the build if libisal.so is not found.
 If this option is not specified and the isal library is missing,



[31/50] [abbrv] hadoop git commit: HADOOP-12967. Remove FileUtil#copyMerge. Contributed by Brahma Reddy Battula.

2016-04-05 Thread aengineer
HADOOP-12967. Remove FileUtil#copyMerge. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da614ca5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da614ca5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da614ca5

Branch: refs/heads/HDFS-1312
Commit: da614ca5dc26562d7ecd5d7c5743fa52c3c17342
Parents: 1e6f929
Author: Akira Ajisaka 
Authored: Mon Apr 4 17:46:56 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Apr 4 17:48:08 2016 +0900

--
 .../java/org/apache/hadoop/fs/FileUtil.java | 42 --
 .../java/org/apache/hadoop/fs/TestFileUtil.java | 58 
 2 files changed, 100 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da614ca5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index b855c48..e2d6ecd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -23,7 +23,6 @@ import java.net.InetAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Enumeration;
 import java.util.List;
 import java.util.Map;
@@ -381,47 +380,6 @@ public class FileUtil {
 
   }
 
-  @Deprecated
-  /** Copy all files in a directory to one output file (merge). */
-  public static boolean copyMerge(FileSystem srcFS, Path srcDir,
-  FileSystem dstFS, Path dstFile,
-  boolean deleteSource,
-  Configuration conf, String addString) throws 
IOException {
-dstFile = checkDest(srcDir.getName(), dstFS, dstFile, false);
-
-if (!srcFS.getFileStatus(srcDir).isDirectory())
-  return false;
-
-OutputStream out = dstFS.create(dstFile);
-
-try {
-  FileStatus contents[] = srcFS.listStatus(srcDir);
-  Arrays.sort(contents);
-  for (int i = 0; i < contents.length; i++) {
-if (contents[i].isFile()) {
-  InputStream in = srcFS.open(contents[i].getPath());
-  try {
-IOUtils.copyBytes(in, out, conf, false);
-if (addString!=null)
-  out.write(addString.getBytes("UTF-8"));
-
-  } finally {
-in.close();
-  }
-}
-  }
-} finally {
-  out.close();
-}
-
-
-if (deleteSource) {
-  return srcFS.delete(srcDir, true);
-} else {
-  return true;
-}
-  }
-
   /** Copy local files to a FileSystem. */
   public static boolean copy(File src,
  FileSystem dstFS, Path dst,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da614ca5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
index f7464b7..a9ef5c0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
@@ -19,11 +19,9 @@ package org.apache.hadoop.fs;
 
 import org.junit.*;
 
-import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
-import java.io.FileReader;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.net.InetAddress;
@@ -49,7 +47,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.tools.tar.TarEntry;
 import org.apache.tools.tar.TarOutputStream;
 
-import javax.print.attribute.URISyntax;
 
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
@@ -526,61 +523,6 @@ public class TestFileUtil {
 validateAndSetWritablePermissions(false, ret);
   }
   
-  @Test (timeout = 3)
-  public void testCopyMergeSingleDirectory() throws IOException {
-setupDirs();
-boolean copyMergeResult = copyMerge("partitioned", "tmp/merged");
-Assert.assertTrue("Expected successful copyMerge result.", 
copyMergeResult);
-File merged = new File(TEST_DIR, "tmp/merged");
-Assert.assertTrue("File tmp/merged must exist after copyMerge.",
-merged.exists());
-BufferedReader rdr = new BufferedReader(new FileReader(merged));
-
-try {
-  A

[44/50] [abbrv] hadoop git commit: YARN-4893. Fix some intermittent test failures in TestRMAdminService. Contributed by Brahma Reddy Battula.

2016-04-05 Thread aengineer
YARN-4893. Fix some intermittent test failures in TestRMAdminService. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6be28bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6be28bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6be28bcc

Branch: refs/heads/HDFS-1312
Commit: 6be28bcc461292b24589dae17a235b3eaadc07ed
Parents: 1cbcd4a
Author: Junping Du 
Authored: Tue Apr 5 06:57:26 2016 -0700
Committer: Junping Du 
Committed: Tue Apr 5 06:57:54 2016 -0700

--
 .../org/apache/hadoop/yarn/server/resourcemanager/MockRM.java | 7 +--
 .../yarn/server/resourcemanager/TestRMAdminService.java   | 3 ---
 .../hadoop/yarn/server/resourcemanager/TestRMRestart.java | 2 --
 .../server/resourcemanager/TestResourceTrackerService.java| 6 --
 .../server/resourcemanager/rmapp/TestNodesListManager.java| 5 ++---
 5 files changed, 7 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index d5b64c1..25c558f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -56,12 +56,12 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationContext;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.SignalContainerCommand;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -603,6 +603,7 @@ public class MockRM extends ResourceManager {
   public MockNM registerNode(String nodeIdStr, int memory) throws Exception {
 MockNM nm = new MockNM(nodeIdStr, memory, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
 
@@ -611,6 +612,7 @@ public class MockRM extends ResourceManager {
 MockNM nm =
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService());
 nm.registerNode();
+drainEvents();
 return nm;
   }
   
@@ -620,6 +622,7 @@ public class MockRM extends ResourceManager {
 new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService(),
 YarnVersionInfo.getVersion());
 nm.registerNode(runningApplications);
+drainEvents();
 return nm;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be28bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index 4513cbb..5c69411 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -27,9 +27,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.util.ArrayList;

[28/50] [abbrv] hadoop git commit: HDFS-10253. Fix TestRefreshCallQueue failure (Contributed by Xiaoyu Yao)

2016-04-05 Thread aengineer
HDFS-10253. Fix TestRefreshCallQueue failure (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54b2e78f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54b2e78f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54b2e78f

Branch: refs/heads/HDFS-1312
Commit: 54b2e78fd28c9def42bec7f0418833bad352686c
Parents: 81d04ca
Author: Vinayakumar B 
Authored: Sun Apr 3 13:27:49 2016 +0530
Committer: Vinayakumar B 
Committed: Sun Apr 3 13:27:49 2016 +0530

--
 .../src/test/java/org/apache/hadoop/TestRefreshCallQueue.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54b2e78f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
index 1be2752..5cb7def 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
@@ -92,7 +92,7 @@ public class TestRefreshCallQueue {
 
   @SuppressWarnings("serial")
   public static class MockCallQueue extends LinkedBlockingQueue {
-public MockCallQueue(int cap, String ns, Configuration conf) {
+public MockCallQueue(int levels, int cap, String ns, Configuration conf) {
   super(cap);
   mockQueueConstructions++;
 }



[35/50] [abbrv] hadoop git commit: HDFS-9599. TestDecommissioningStatus.testDecommissionStatus occasionally fails (Lin Yiqun via iwasakims)

2016-04-05 Thread aengineer
HDFS-9599. TestDecommissioningStatus.testDecommissionStatus occasionally fails 
(Lin Yiqun via iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/154d2532
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/154d2532
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/154d2532

Branch: refs/heads/HDFS-1312
Commit: 154d2532cf015e9ab9141864bd3ab0d6100ef597
Parents: 7280550
Author: Masatake Iwasaki 
Authored: Tue Apr 5 03:19:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Tue Apr 5 03:19:48 2016 +0900

--
 .../hdfs/server/namenode/TestDecommissioningStatus.java | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/154d2532/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
index 789ee6f..1e7312a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
@@ -57,8 +57,8 @@ import 
org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -78,8 +78,8 @@ public class TestDecommissioningStatus {
 
   final ArrayList decommissionedNodes = new 
ArrayList(numDatanodes);
   
-  @BeforeClass
-  public static void setUp() throws Exception {
+  @Before
+  public void setUp() throws Exception {
 conf = new HdfsConfiguration();
 conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
 false);
@@ -113,8 +113,8 @@ public class TestDecommissioningStatus {
 Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
   }
 
-  @AfterClass
-  public static void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
 if (localFileSys != null ) cleanupFile(localFileSys, dir);
 if(fileSys != null) fileSys.close();
 if(cluster != null) cluster.shutdown();



[33/50] [abbrv] hadoop git commit: HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning an empty list. author: Pieter Reuse. - omitted new S3A subclass

2016-04-05 Thread aengineer
HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning 
an empty list. author: Pieter Reuse. - omitted new S3A subclass


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89c93475
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89c93475
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89c93475

Branch: refs/heads/HDFS-1312
Commit: 89c93475116ee475645cf81cc80f155f830e61de
Parents: 5092c94
Author: Steve Loughran 
Authored: Mon Apr 4 17:00:35 2016 +0100
Committer: Steve Loughran 
Committed: Mon Apr 4 17:02:04 2016 +0100

--
 .../s3a/TestS3AContractGetFileStatus.java   | 31 
 1 file changed, 31 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89c93475/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
new file mode 100644
index 000..d7b8fe3
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/TestS3AContractGetFileStatus.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.contract.s3a;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
+
+public class TestS3AContractGetFileStatus extends 
AbstractContractGetFileStatusTest {
+
+  @Override
+  protected AbstractFSContract createContract(Configuration conf) {
+return new S3AContract(conf);
+  }
+
+}



[41/50] [abbrv] hadoop git commit: YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws NPE. Contributed by Sunil G

2016-04-05 Thread aengineer
YARN-4880. Running TestZKRMStateStorePerf with real zookeeper cluster throws 
NPE. Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/552237d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/552237d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/552237d4

Branch: refs/heads/HDFS-1312
Commit: 552237d4a34ab10fa5f9ec7aad7942f2a110993e
Parents: 818d6b7
Author: Rohith Sharma K S 
Authored: Tue Apr 5 14:25:32 2016 +0530
Committer: Rohith Sharma K S 
Committed: Tue Apr 5 14:26:19 2016 +0530

--
 .../resourcemanager/recovery/TestZKRMStateStorePerf.java| 9 ++---
 1 file changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/552237d4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
index 4b0b06a..bd25def 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStorePerf.java
@@ -91,7 +91,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 if (appTokenMgr != null) {
   appTokenMgr.stop();
 }
-curatorTestingServer.stop();
+if (curatorTestingServer != null) {
+  curatorTestingServer.stop();
+}
   }
 
   private void initStore(String hostPort) {
@@ -99,8 +101,9 @@ public class TestZKRMStateStorePerf extends 
RMStateStoreTestBase
 RMContext rmContext = mock(RMContext.class);
 
 conf = new YarnConfiguration();
-conf.set(YarnConfiguration.RM_ZK_ADDRESS,
-optHostPort.or(curatorTestingServer.getConnectString()));
+conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort
+.or((curatorTestingServer == null) ? "" : curatorTestingServer
+.getConnectString()));
 conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
 
 store = new ZKRMStateStore();



[40/50] [abbrv] hadoop git commit: HDFS-9917. IBR accumulate more objects when SNN was down for sometime. (Contributed by Brahma Reddy Battula)

2016-04-05 Thread aengineer
HDFS-9917. IBR accumulate more objects when SNN was down for sometime. 
(Contributed by Brahma Reddy Battula)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/818d6b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/818d6b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/818d6b79

Branch: refs/heads/HDFS-1312
Commit: 818d6b799eead13a17a0214172df60a269b046fb
Parents: f6b1a81
Author: Vinayakumar B 
Authored: Tue Apr 5 09:49:39 2016 +0800
Committer: Vinayakumar B 
Committed: Tue Apr 5 09:49:39 2016 +0800

--
 .../hdfs/server/datanode/BPServiceActor.java|  5 +
 .../datanode/IncrementalBlockReportManager.java |  9 ++
 .../server/datanode/TestBPOfferService.java | 96 +++-
 3 files changed, 107 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 49f64c2..39f8219 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -798,6 +798,11 @@ class BPServiceActor implements Runnable {
   // and re-register
   register(nsInfo);
   scheduler.scheduleHeartbeat();
+  // HDFS-9917,Standby NN IBR can be very huge if standby namenode is down
+  // for sometime.
+  if (state == HAServiceState.STANDBY) {
+ibrManager.clearIBRs();
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
index b9b348a..e95142d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/IncrementalBlockReportManager.java
@@ -258,4 +258,13 @@ class IncrementalBlockReportManager {
   }
 }
   }
+
+  void clearIBRs() {
+pendingIBRs.clear();
+  }
+
+  @VisibleForTesting
+  int getPendingIBRSize() {
+return pendingIBRs.size();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/818d6b79/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 95a103e..29db702 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -30,6 +30,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
@@ -48,10 +49,12 @@ import 
org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.serv

[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-1312

2016-04-05 Thread aengineer
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48a8c9c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48a8c9c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48a8c9c3

Branch: refs/heads/HDFS-1312
Commit: 48a8c9c3e2cc3b24913a196dcf6b47ba93a07a92
Parents: 12b4cf7 500e5a5
Author: Anu Engineer 
Authored: Tue Apr 5 12:28:48 2016 -0700
Committer: Anu Engineer 
Committed: Tue Apr 5 12:28:48 2016 -0700

--
 BUILDING.txt|6 +-
 dev-support/bin/dist-tools-hooks-maker  |  182 +++
 dev-support/docker/Dockerfile   |8 +
 .../server/AuthenticationFilter.java|   33 +-
 .../JWTRedirectAuthenticationHandler.java   |   16 +-
 .../TestJWTRedirectAuthentictionHandler.java|   37 +-
 .../hadoop-common/src/main/bin/hadoop   |   22 +-
 .../hadoop-common/src/main/bin/hadoop-config.sh |6 +-
 .../hadoop-common/src/main/bin/hadoop-daemon.sh |6 +-
 .../src/main/bin/hadoop-daemons.sh  |6 +-
 .../src/main/bin/hadoop-functions.sh|  127 ++-
 .../src/main/bin/hadoop-layout.sh.example   |   22 +-
 .../hadoop-common/src/main/bin/slaves.sh|6 +-
 .../hadoop-common/src/main/bin/start-all.sh |4 +-
 .../hadoop-common/src/main/bin/stop-all.sh  |4 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |   31 +-
 .../org/apache/hadoop/conf/Configuration.java   |   13 +
 .../crypto/key/kms/KMSClientProvider.java   |8 +-
 .../hadoop/crypto/key/kms/ValueQueue.java   |   16 +-
 .../hadoop/fs/CommonConfigurationKeys.java  |   14 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  126 ++-
 .../java/org/apache/hadoop/fs/FileUtil.java |   41 -
 .../org/apache/hadoop/fs/PathIOException.java   |9 +
 .../hadoop/fs/shell/CommandWithDestination.java |3 +-
 .../apache/hadoop/fs/shell/MoveCommands.java|6 +-
 .../java/org/apache/hadoop/fs/shell/Touch.java  |3 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |   13 +-
 .../io/erasurecode/rawcoder/RSRawDecoder.java   |  175 +++
 .../io/erasurecode/rawcoder/RSRawDecoder2.java  |  176 ---
 .../io/erasurecode/rawcoder/RSRawEncoder.java   |   75 ++
 .../io/erasurecode/rawcoder/RSRawEncoder2.java  |   76 --
 .../rawcoder/RSRawErasureCoderFactory.java  |   37 +
 .../rawcoder/RSRawErasureCoderFactory2.java |   37 -
 .../io/erasurecode/rawcoder/util/RSUtil.java|  149 ++-
 .../io/erasurecode/rawcoder/util/RSUtil2.java   |  172 ---
 .../org/apache/hadoop/ipc/CallQueueManager.java |  134 ++-
 .../main/java/org/apache/hadoop/ipc/Client.java |  166 +--
 .../apache/hadoop/ipc/DecayRpcScheduler.java|  396 +--
 .../hadoop/ipc/DecayRpcSchedulerMXBean.java |2 +
 .../apache/hadoop/ipc/DefaultRpcScheduler.java  |   45 +
 .../org/apache/hadoop/ipc/FairCallQueue.java|   45 +-
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|8 +-
 .../org/apache/hadoop/ipc/RpcScheduler.java |8 +-
 .../java/org/apache/hadoop/ipc/Schedulable.java |5 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |   77 +-
 .../apache/hadoop/ipc/WritableRpcEngine.java|   47 +-
 .../hadoop/metrics2/lib/MutableQuantiles.java   |7 +-
 .../hadoop/metrics2/util/QuantileEstimator.java |   32 +
 .../hadoop/metrics2/util/SampleQuantiles.java   |2 +-
 .../java/org/apache/hadoop/net/NetUtils.java|   31 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |   17 +-
 .../java/org/apache/hadoop/net/NodeBase.java|   18 +-
 .../apache/hadoop/security/SecurityUtil.java|   53 +-
 .../apache/hadoop/security/ssl/SSLFactory.java  |   42 +-
 .../apache/hadoop/service/AbstractService.java  |2 +-
 .../org/apache/hadoop/tracing/TraceUtils.java   |4 +-
 .../hadoop/util/NativeLibraryChecker.java   |8 +-
 .../main/java/org/apache/hadoop/util/Shell.java |   10 +-
 .../apache/hadoop/util/ShutdownHookManager.java |  116 +-
 .../hadoop/io/compress/lz4/Lz4Compressor.c  |4 +-
 .../src/org/apache/hadoop/io/compress/lz4/lz4.c |2 +-
 .../hadoop/io/erasurecode/erasure_coder.c   |1 +
 .../apache/hadoop/io/erasurecode/isal_load.c|   46 +-
 .../apache/hadoop/io/erasurecode/isal_load.h|6 +-
 .../io/erasurecode/jni_erasure_code_native.c|   11 +-
 .../src/main/resources/core-default.xml |   11 +-
 .../src/site/markdown/ClusterSetup.md   |   40 +-
 .../src/site/markdown/CommandsManual.md |2 +-
 .../src/site/markdown/GroupsMapping.md  |   26 +-
 .../src/site/markdown/HttpAuthentication.md |6 +-
 .../src/site/markdown/UnixShellGuide.md |2 +-
 .../hadoop/crypto/key/TestValueQueue.java   |6 -
 .../java/org/apache/hadoop/fs/TestFileUtil.java |   57 -
 .../org/apache/hadoop/fs/TestFsShellCopy.java   |   52 +-
 .../org/apache/hadoop/fs/T

[29/50] [abbrv] hadoop git commit: HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning an empty list. author: Pieter Reuse.

2016-04-05 Thread aengineer
HADOOP-12169 ListStatus on empty dir in S3A lists itself instead of returning 
an empty list. author: Pieter Reuse.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ecdd4cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ecdd4cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ecdd4cf

Branch: refs/heads/HDFS-1312
Commit: 0ecdd4cffa51e34997321c384496efc249e3d8ff
Parents: 54b2e78
Author: Steve Loughran 
Authored: Sun Apr 3 16:39:14 2016 +0100
Committer: Steve Loughran 
Committed: Sun Apr 3 16:40:19 2016 +0100

--
 .../AbstractContractGetFileStatusTest.java  | 23 
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  7 --
 .../src/test/resources/contract/s3a.xml |  5 +
 3 files changed, 33 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ecdd4cf/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
index 7ed375e..3e5bb12 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
@@ -19,10 +19,14 @@
 package org.apache.hadoop.fs.contract;
 
 import java.io.FileNotFoundException;
+import java.io.IOException;
 
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,4 +62,23 @@ public abstract class AbstractContractGetFileStatusTest 
extends
   handleExpectedException(e);
 }
   }
+
+  @Test
+  public void testListStatusEmptyDirectory() throws IOException {
+// remove the test directory
+FileSystem fs = getFileSystem();
+assertTrue(fs.delete(getContract().getTestPath(), true));
+
+// create a - non-qualified - Path for a subdir
+Path subfolder = 
getContract().getTestPath().suffix("/"+testPath.getName());
+assertTrue(fs.mkdirs(subfolder));
+
+// assert empty ls on the empty dir
+assertEquals("ls on an empty directory not of length 0", 0,
+fs.listStatus(subfolder).length);
+
+// assert non-empty ls on parent dir
+assertTrue("ls on a non-empty directory of length 0",
+fs.listStatus(getContract().getTestPath()).length > 0);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ecdd4cf/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 6afb05d..fe705ce 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -788,11 +788,14 @@ public class S3AFileSystem extends FileSystem {
   ObjectListing objects = s3.listObjects(request);
   statistics.incrementReadOps(1);
 
+  Path fQualified = f.makeQualified(uri, workingDir);
+
   while (true) {
 for (S3ObjectSummary summary : objects.getObjectSummaries()) {
   Path keyPath = keyToPath(summary.getKey()).makeQualified(uri, 
workingDir);
   // Skip over keys that are ourselves and old S3N _$folder$ files
-  if (keyPath.equals(f) || 
summary.getKey().endsWith(S3N_FOLDER_SUFFIX)) {
+  if (keyPath.equals(fQualified) ||
+  summary.getKey().endsWith(S3N_FOLDER_SUFFIX)) {
 if (LOG.isDebugEnabled()) {
   LOG.debug("Ignoring: " + keyPath);
 }
@@ -807,7 +810,7 @@ public class S3AFileSystem extends FileSystem {
   } else {
 result.add(new S3AFileStatus(summary.getSize(),
 dateToLong(summary.getLastModified()), keyPath,
-getDefaultBlockSize(f.makeQualified(uri, workingDir;
+getDefaultBlockSize(fQualified)));
 if (LOG.isDebugEnabled()) {
   LOG.debug("Adding: fi: " + keyPath);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ecdd4cf/hadoop-tools/hadoop-aws/src/test/resources

[20/50] [abbrv] hadoop git commit: HADOOP-12955. Fix bugs in the initialization of the ISA-L library JNI bindings (Kai Zheng via cmccabe)

2016-04-05 Thread aengineer
HADOOP-12955. Fix bugs in the initialization of the ISA-L library JNI bindings 
(Kai Zheng via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19639785
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19639785
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19639785

Branch: refs/heads/HDFS-1312
Commit: 19639785f5e9c483558ce585287b9dda9d626263
Parents: 12b11e2
Author: Colin Patrick Mccabe 
Authored: Thu Mar 31 15:09:11 2016 -0700
Committer: Colin Patrick Mccabe 
Committed: Thu Mar 31 15:09:11 2016 -0700

--
 .../hadoop/util/NativeLibraryChecker.java   |  8 ++--
 .../hadoop/io/erasurecode/erasure_coder.c   |  1 +
 .../apache/hadoop/io/erasurecode/isal_load.c| 46 +---
 .../apache/hadoop/io/erasurecode/isal_load.h|  6 +--
 .../io/erasurecode/jni_erasure_code_native.c| 11 +++--
 5 files changed, 34 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19639785/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
index 46f0897..e166bec 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
@@ -95,12 +95,12 @@ public class NativeLibraryChecker {
 snappyLibraryName = SnappyCodec.getLibraryName();
   }
 
-  try {
-isalDetail = ErasureCodeNative.getLoadingFailureReason();
+  isalDetail = ErasureCodeNative.getLoadingFailureReason();
+  if (isalDetail != null) {
+isalLoaded = false;
+  } else {
 isalDetail = ErasureCodeNative.getLibraryName();
 isalLoaded = true;
-  } catch (UnsatisfiedLinkError e) {
-isalLoaded = false;
   }
 
   openSslDetail = OpensslCipher.getLoadingFailureReason();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19639785/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
index b3479bb..b2d856b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/erasure_coder.c
@@ -19,6 +19,7 @@
 #include "erasure_code.h"
 #include "gf_util.h"
 #include "erasure_coder.h"
+#include "dump.h"
 
 #include 
 #include 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19639785/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
index 55e8efd..26d8e1a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/isal_load.c
@@ -78,6 +78,12 @@ static const char* load_functions() {
 
 void load_erasurecode_lib(char* err, size_t err_len) {
   const char* errMsg;
+  const char* library = NULL;
+#ifdef UNIX
+  Dl_info dl_info;
+#else
+  LPTSTR filename = NULL;
+#endif
 
   err[0] = '\0';
 
@@ -111,38 +117,28 @@ void load_erasurecode_lib(char* err, size_t err_len) {
   if (errMsg != NULL) {
 snprintf(err, err_len, "Loading functions from ISA-L failed: %s", errMsg);
   }
-}
 
-int build_support_erasurecode() {
-#ifdef HADOOP_ISAL_LIBRARY
-  return 1;
-#else
-  return 0;
-#endif
-}
-
-const char* get_library_name() {
 #ifdef UNIX
-  Dl_info dl_info;
-
-  if (isaLoader->ec_encode_data == NULL) {
-return HADOOP_ISAL_LIBRARY;
-  }
-
   if(dladdr(isaLoader->ec_encode_data, &dl_info)) {
-return dl_info.dli_fname;
+library = dl_info.dli_fname;
   }
 #else
-  LPTSTR filename = NULL;
-
-  if (isaLoader->libec == NULL) {
-return HADOOP_ISAL_LIBRARY;
-  }
-
   if (GetModuleFileName(isaLoader->libec, filename, 256) > 0) {
-return filename;
+library = fil

[32/50] [abbrv] hadoop git commit: YARN-4746. yarn web services should convert parse failures of appId, appAttemptId and containerId to 400. Contributed by Bibin A Chundatt

2016-04-05 Thread aengineer
YARN-4746. yarn web services should convert parse failures of appId, 
appAttemptId and containerId to 400. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5092c941
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5092c941
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5092c941

Branch: refs/heads/HDFS-1312
Commit: 5092c94195a63bd2c3e36d5a74b4c061cea1b847
Parents: da614ca
Author: naganarasimha 
Authored: Mon Apr 4 16:25:03 2016 +0530
Committer: naganarasimha 
Committed: Mon Apr 4 16:25:03 2016 +0530

--
 .../apache/hadoop/yarn/util/ConverterUtils.java | 16 --
 .../hadoop/yarn/webapp/util/WebAppUtils.java| 22 ++
 .../hadoop/yarn/server/webapp/WebServices.java  | 22 +++---
 .../nodemanager/webapp/NMWebServices.java   |  6 ++--
 .../webapp/TestNMWebServicesApps.java   |  9 --
 .../resourcemanager/webapp/RMWebServices.java   | 32 ++--
 .../webapp/TestRMWebServicesApps.java   | 24 +--
 .../TestRMWebServicesAppsModification.java  | 10 --
 8 files changed, 87 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5092c941/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
index e9674cf..acd29fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
@@ -122,8 +122,20 @@ public class ConverterUtils {
   public static ApplicationId toApplicationId(RecordFactory recordFactory,
   String appIdStr) {
 Iterator it = _split(appIdStr).iterator();
-it.next(); // prefix. TODO: Validate application prefix
-return toApplicationId(recordFactory, it);
+if (!it.next().equals(APPLICATION_PREFIX)) {
+  throw new IllegalArgumentException("Invalid ApplicationId prefix: "
+  + appIdStr + ". The valid ApplicationId should start with prefix "
+  + APPLICATION_PREFIX);
+}
+try {
+  return toApplicationId(recordFactory, it);
+} catch (NumberFormatException n) {
+  throw new IllegalArgumentException("Invalid ApplicationId: " + appIdStr,
+  n);
+} catch (NoSuchElementException e) {
+  throw new IllegalArgumentException("Invalid ApplicationId: " + appIdStr,
+  e);
+}
   }
 
   private static ApplicationId toApplicationId(RecordFactory recordFactory,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5092c941/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index f8e67ee..faf4a77 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -33,9 +33,14 @@ import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.RMHAUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
 
 @Private
 @Evolving
@@ -378,4 +383,21 @@ public class WebAppUtils {
 }
 return password;
   }
+
+  public static ApplicationId parseApplicationId(RecordFactory recordFactory,
+  String appId) {
+if (appId == null || appId.isEmpty()) {
+  throw new NotFoundException("appId, " + appId + ", is empty or null");
+}
+ApplicationId aid = null;
+try {
+  aid = ConverterUtils.toApplicationId(recordFactory, appId);
+} catch (Exception e) {
+  

[11/50] [abbrv] hadoop git commit: YARN-4857. Add missing default configuration regarding preemption of CapacityScheduler. Contributed by Kai Sasaki.

2016-04-05 Thread aengineer
YARN-4857. Add missing default configuration regarding preemption of 
CapacityScheduler. Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0064cba1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0064cba1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0064cba1

Branch: refs/heads/HDFS-1312
Commit: 0064cba169d1bb761f6e81ee86830be598d7c500
Parents: f1b8f6b
Author: Varun Vasudev 
Authored: Thu Mar 31 14:05:49 2016 +0530
Committer: Varun Vasudev 
Committed: Thu Mar 31 14:05:49 2016 +0530

--
 .../src/main/resources/yarn-default.xml | 58 
 1 file changed, 58 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0064cba1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index ea1afe4..33cd919 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -908,6 +908,64 @@
 60
   
 
+  
+
+If true, run the policy but do not affect the cluster with preemption and 
kill events.
+
+yarn.resourcemanager.monitor.capacity.preemption.observe_only
+false
+  
+
+  
+
+Time in milliseconds between invocations of this 
ProportionalCapacityPreemptionPolicy
+policy.
+
+
yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
+3000
+  
+
+  
+
+Time in milliseconds between requesting a preemption from an application 
and killing
+the container.
+
+
yarn.resourcemanager.monitor.capacity.preemption.max_wait_before_kill
+15000
+  
+
+  
+
+Maximum percentage of resources preempted in a single round. By 
controlling this valueone
+can throttle the pace at which containers are reclaimed from the cluster. 
After computing
+the total desired preemption, the policy scales it back within this limit.
+
+
yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
+0.1
+  
+
+  
+
+Maximum amount of resources above the target capacity ignored for 
preemption.
+This defines a deadzone around the target capacity that helps prevent 
thrashing and
+oscillations around the computed target balance. High values would slow 
the time to capacity
+and (absent natural.completions) it might prevent convergence to 
guaranteed capacity.
+
+
yarn.resourcemanager.monitor.capacity.preemption.max_ignored_over_capacity
+0.1
+  
+
+  
+
+Given a computed preemption target, account for containers naturally 
expiring and preempt
+only this percentage of the delta. This determines the rate of geometric 
convergence into
+the deadzone (MAX_IGNORED_OVER_CAPACITY). For example, a termination 
factor of 0.5 will reclaim
+almost 95% of resources within 5 * #WAIT_TIME_BEFORE_KILL, even absent 
natural termination.
+
+
yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
+0.2
+  
+
   
 
   



[36/50] [abbrv] hadoop git commit: HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet. Contributed by Kihwal Lee.

2016-04-05 Thread aengineer
HDFS-10178. Permanent write failures can happen if pipeline recoveries occur 
for the first packet. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7d1fb0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7d1fb0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7d1fb0c

Branch: refs/heads/HDFS-1312
Commit: a7d1fb0cd2fdbf830602eb4dbbd9bbe62f4d5584
Parents: 154d253
Author: Kihwal Lee 
Authored: Mon Apr 4 16:39:23 2016 -0500
Committer: Kihwal Lee 
Committed: Mon Apr 4 16:40:00 2016 -0500

--
 .../hdfs/server/datanode/BlockReceiver.java |  2 +
 .../hdfs/server/datanode/BlockSender.java   |  6 ++-
 .../server/datanode/DataNodeFaultInjector.java  |  2 +
 .../TestClientProtocolForPipelineRecovery.java  | 53 
 4 files changed, 62 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 2e4ee02..fb0c1c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -574,6 +574,8 @@ class BlockReceiver implements Closeable {
 if (mirrorOut != null && !mirrorError) {
   try {
 long begin = Time.monotonicNow();
+// For testing. Normally no-op.
+DataNodeFaultInjector.get().stopSendingPacketDownstream();
 packetReceiver.mirrorPacketTo(mirrorOut);
 mirrorOut.flush();
 long now = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 773a64c..398935d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -300,11 +300,15 @@ class BlockSender implements java.io.Closeable {
 
 // The meta file will contain only the header if the NULL checksum
 // type was used, or if the replica was written to transient 
storage.
+// Also, when only header portion of a data packet was transferred
+// and then pipeline breaks, the meta file can contain only the
+// header and 0 byte in the block data file.
 // Checksum verification is not performed for replicas on transient
 // storage.  The header is important for determining the checksum
 // type later when lazy persistence copies the block to 
non-transient
 // storage and computes the checksum.
-if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
+if (!replica.isOnTransientStorage() &&
+metaIn.getLength() >= BlockMetadataHeader.getHeaderSize()) {
   checksumIn = new DataInputStream(new BufferedInputStream(
   metaIn, IO_FILE_BUFFER_SIZE));
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0e38694..7327420 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -50,5 +50,7 @@ public class DataNodeFaultInjector {
 return false;
   }
 
+  public void stopSendingPacketDownstream() throws IOException {}
+
   public void noRegistration() throws IOException { }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7d1fb0c/hadoop-hdfs-project/hadoop-hdfs/s

hadoop git commit: HDFS-10250. Ozone: Add key Persistence. Contributed by Anu Engineer.

2016-04-05 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 4ac97b181 -> b3044db40


HDFS-10250. Ozone: Add key Persistence. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3044db4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3044db4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3044db4

Branch: refs/heads/HDFS-7240
Commit: b3044db40731ed4d915c9d2452fbd6175c52e10a
Parents: 4ac97b1
Author: Chris Nauroth 
Authored: Tue Apr 5 12:59:45 2016 -0700
Committer: Chris Nauroth 
Committed: Tue Apr 5 12:59:45 2016 -0700

--
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   4 +
 .../container/common/helpers/ChunkUtils.java|   6 +-
 .../container/common/helpers/ContainerData.java |   4 +-
 .../ozone/container/common/helpers/KeyData.java | 160 +
 .../container/common/helpers/KeyUtils.java  |  98 +++
 .../common/impl/ContainerManagerImpl.java   |  22 +++
 .../ozone/container/common/impl/Dispatcher.java | 126 +-
 .../container/common/impl/KeyManagerImpl.java   | 145 
 .../common/interfaces/ContainerManager.java |  26 ++-
 .../container/common/interfaces/KeyManager.java |  63 +++
 .../container/common/utils/ContainerCache.java  | 111 
 .../container/common/utils/LevelDBStore.java|   5 +-
 .../container/ozoneimpl/OzoneContainer.java |   6 +
 .../main/proto/DatanodeContainerProtocol.proto  | 119 ++---
 .../ozone/container/ContainerTestHelper.java|  93 +-
 .../common/impl/TestContainerPersistence.java   | 170 ---
 .../container/ozoneimpl/TestOzoneContainer.java |  23 +++
 17 files changed, 1080 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3044db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 27c79c0..cb2ad22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -54,6 +54,10 @@ public final class OzoneConfigKeys {
   public static final String DFS_OZONE_METADATA_DIRS =
   "dfs.ozone.metadata.dirs";
 
+  public static final String DFS_OZONE_KEY_CACHE = "dfs.ozone.key.cache.size";
+  public static final int DFS_OZONE_KEY_CACHE_DEFAULT = 1024;
+
+
 
   /**
* There is no need to instantiate this class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3044db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
index 03370ac..15e4524 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
@@ -216,7 +216,7 @@ public final class ChunkUtils {
* Reads data from an existing chunk file.
*
* @param chunkFile - file where data lives.
-   * @param data  - chunk defintion.
+   * @param data  - chunk definition.
* @return ByteBuffer
* @throws IOException
* @throws ExecutionException
@@ -284,8 +284,8 @@ public final class ChunkUtils {
byte[] data, ChunkInfo info) {
 Preconditions.checkNotNull(msg);
 
-ContainerProtos.ReadChunkReponseProto.Builder response =
-ContainerProtos.ReadChunkReponseProto.newBuilder();
+ContainerProtos.ReadChunkResponseProto.Builder response =
+ContainerProtos.ReadChunkResponseProto.newBuilder();
 response.setChunkData(info.getProtoBufMessage());
 response.setData(ByteString.copyFrom(data));
 response.setPipeline(msg.getReadChunk().getPipeline());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3044db4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/a

hadoop git commit: HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by Brahma Reddy Battula.

2016-04-05 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk 500e5a595 -> 0cd320a84


HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cd320a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cd320a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cd320a8

Branch: refs/heads/trunk
Commit: 0cd320a8463efe19a6228f9fe14693aa37ac8a10
Parents: 500e5a5
Author: Ravi Prakash 
Authored: Tue Apr 5 13:41:19 2016 -0700
Committer: Ravi Prakash 
Committed: Tue Apr 5 13:41:19 2016 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd320a8/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 3d9ca42..a9c3304 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -310,7 +310,7 @@
   {#LiveNodes}
   
 {name} ({xferaddr})
-{#helper_relative_time 
value="{lastContact}"/}
+{lastContact}s
 
   
 {capacity|fmt_bytes}



hadoop git commit: HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by Brahma Reddy Battula.

2016-04-05 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e98bb0279 -> 2ca6251a6


HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by 
Brahma Reddy Battula.

(cherry picked from commit 0cd320a8463efe19a6228f9fe14693aa37ac8a10)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ca6251a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ca6251a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ca6251a

Branch: refs/heads/branch-2
Commit: 2ca6251a6ae1acfed1c639e32c6a7e849b4d47a9
Parents: e98bb02
Author: Ravi Prakash 
Authored: Tue Apr 5 13:41:19 2016 -0700
Committer: Ravi Prakash 
Committed: Tue Apr 5 13:43:42 2016 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ca6251a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 3d9ca42..a9c3304 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -310,7 +310,7 @@
   {#LiveNodes}
   
 {name} ({xferaddr})
-{#helper_relative_time 
value="{lastContact}"/}
+{lastContact}s
 
   
 {capacity|fmt_bytes}



hadoop git commit: HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by Brahma Reddy Battula.

2016-04-05 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 deef54b26 -> 7286c435c


HDFS-10235. Last contact for Live Nodes should be relative time. Contributed by 
Brahma Reddy Battula.

(cherry picked from commit 0cd320a8463efe19a6228f9fe14693aa37ac8a10)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7286c435
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7286c435
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7286c435

Branch: refs/heads/branch-2.8
Commit: 7286c435c46fdc049a1a025bee931c3dbb2f8303
Parents: deef54b
Author: Ravi Prakash 
Authored: Tue Apr 5 13:41:19 2016 -0700
Committer: Ravi Prakash 
Committed: Tue Apr 5 13:43:08 2016 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7286c435/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 3d9ca42..a9c3304 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -310,7 +310,7 @@
   {#LiveNodes}
   
 {name} ({xferaddr})
-{#helper_relative_time 
value="{lastContact}"/}
+{lastContact}s
 
   
 {capacity|fmt_bytes}



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0cd320a84 -> 9ba1e5af0


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ba1e5af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ba1e5af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ba1e5af

Branch: refs/heads/trunk
Commit: 9ba1e5af06070ba01dcf46e1a4c66713a1d43352
Parents: 0cd320a
Author: Kihwal Lee 
Authored: Tue Apr 5 16:26:18 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:26:18 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 46 ++--
 1 file changed, 33 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ba1e5af/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index ed53512..b8fc30d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -39,6 +44,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -49,22 +57,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-setNNs();
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
 
-cluster.transitionToActive(0);
+setNNs();
+fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2ca6251a6 -> 6e37c5fe8


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.

(cherry picked from commit 9ba1e5af06070ba01dcf46e1a4c66713a1d43352)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e37c5fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e37c5fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e37c5fe

Branch: refs/heads/branch-2
Commit: 6e37c5fe80ee9ce46afc26838391cc27ca6b1e6f
Parents: 2ca6251
Author: Kihwal Lee 
Authored: Tue Apr 5 16:49:59 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:49:59 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 47 ++--
 1 file changed, 33 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e37c5fe/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index b74cd7f..3299673 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -35,6 +40,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -45,23 +53,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-nn0 = cluster.getNameNode(0);
-nn1 = cluster.getNameNode(1);
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+fs = HATestUtil.configureFailoverFs(cluster, conf);
 
-cluster.transitionToActive(0);
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7286c435c -> 8bb465564


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.

(cherry picked from commit 9ba1e5af06070ba01dcf46e1a4c66713a1d43352)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java

(cherry picked from commit 6e37c5fe80ee9ce46afc26838391cc27ca6b1e6f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bb46556
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bb46556
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bb46556

Branch: refs/heads/branch-2.8
Commit: 8bb46556498b9c40ec8d13230ee2e3a7790f572c
Parents: 7286c43
Author: Kihwal Lee 
Authored: Tue Apr 5 16:54:46 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 16:54:46 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 47 ++--
 1 file changed, 33 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb46556/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index b74cd7f..3299673 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -35,6 +40,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -45,23 +53,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-nn0 = cluster.getNameNode(0);
-nn1 = cluster.getNameNode(1);
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+fs = HATestUtil.configureFailoverFs(cluster, conf);
 
-cluster.transitionToActive(0);
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. Contributed by Eric Badger.

2016-04-05 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 76e0bb7a1 -> dd701c980


HDFS-10261. TestBookKeeperHACheckpoints doesn't handle ephemeral HTTP ports. 
Contributed by Eric Badger.

(cherry picked from commit 9ba1e5af06070ba01dcf46e1a4c66713a1d43352)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java

(cherry picked from commit 6e37c5fe80ee9ce46afc26838391cc27ca6b1e6f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd701c98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd701c98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd701c98

Branch: refs/heads/branch-2.7
Commit: dd701c9800ed241f66d4f0777f50affcc21544e7
Parents: 76e0bb7
Author: Kihwal Lee 
Authored: Tue Apr 5 17:00:42 2016 -0500
Committer: Kihwal Lee 
Committed: Tue Apr 5 17:00:42 2016 -0500

--
 .../bkjournal/TestBookKeeperHACheckpoints.java  | 47 ++--
 1 file changed, 33 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd701c98/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
index b74cd7f..3299673 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.contrib.bkjournal;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -27,6 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.net.BindException;
+import java.util.Random;
+
 /**
  * Runs the same tests as TestStandbyCheckpoints, but
  * using a bookkeeper journal manager as the shared directory
@@ -35,6 +40,9 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
   private static BKJMUtil bkutil = null;
   static int numBookies = 3;
   static int journalCount = 0;
+  private final Random random = new Random();
+
+  private static final Log LOG = 
LogFactory.getLog(TestStandbyCheckpoints.class);
 
   @SuppressWarnings("rawtypes")
   @Override
@@ -45,23 +53,34 @@ public class TestBookKeeperHACheckpoints extends 
TestStandbyCheckpoints {
  BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
  .toString());
 BKJMUtil.addJournalManagerDefinition(conf);
-MiniDFSNNTopology topology = new MiniDFSNNTopology()
-  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
 
-cluster = new MiniDFSCluster.Builder(conf)
-  .nnTopology(topology)
-  .numDataNodes(1)
-  .manageNameDfsSharedDirs(false)
-  .build();
-cluster.waitActive();
+int retryCount = 0;
+while (true) {
+  try {
+int basePort = 10060 + random.nextInt(100) * 2;
+MiniDFSNNTopology topology = new MiniDFSNNTopology()
+  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
+.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
+.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 
1)));
 
-nn0 = cluster.getNameNode(0);
-nn1 = cluster.getNameNode(1);
-fs = HATestUtil.configureFailoverFs(cluster, conf);
+cluster = new MiniDFSCluster.Builder(conf)
+  .nnTopology(topology)
+  .numDataNodes(1)
+  .manageNameDfsSharedDirs(false)
+  .build();
+cluster.waitActive();
+nn0 = cluster.getNameNode(0);
+nn1 = cluster.getNameNode(1);
+fs = HATestUtil.configureFailoverFs(cluster, conf);
 
-cluster.transitionToActive(0);
+cluster.transitionToActive(0);
+++retryCount;
+break;
+  } catch (BindException e) {
+LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
++ retryCount + " times");
+  }
+}
   }
 
   @BeforeClass



hadoop git commit: YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode is used to change label of a node. (Sunil G via wangda)

2016-04-05 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9ba1e5af0 -> 21eb42844


YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode 
is used to change label of a node. (Sunil G via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21eb4284
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21eb4284
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21eb4284

Branch: refs/heads/trunk
Commit: 21eb4284487d6f8e4beedb8a0c3168e952f224fc
Parents: 9ba1e5a
Author: Wangda Tan 
Authored: Tue Apr 5 16:24:11 2016 -0700
Committer: Wangda Tan 
Committed: Tue Apr 5 16:24:11 2016 -0700

--
 .../scheduler/capacity/AbstractCSQueue.java |  6 +++
 .../scheduler/capacity/CSQueueUtils.java|  2 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 40 +++-
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21eb4284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 6e715fb..c7d6d02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -591,6 +591,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.incUsed(nodeLabel, resourceToInc);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.incUsedResource(nodeLabel, resourceToInc, null);
 }
@@ -604,6 +607,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.decUsed(nodeLabel, resourceToDec);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.decUsedResource(nodeLabel, resourceToDec, null);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21eb4284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index 9cdcb72..0166d83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -180,7 +180,7 @@ class CSQueueUtils {
* Update partitioned resource usage, if nodePartition == null, will update
* used resource for all partitions of this queue.
*/
-  private static void updateUsedCapacity(final ResourceCalculator rc,
+  public static void updateUsedCapacity(final ResourceCalculator rc,
   final Resource totalPartitionResource, final Resource minimumAllocation,
   ResourceUsage queueResourceUsage, QueueCapacities queueCapacities,
   String nodePartition) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21eb4284/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop

hadoop git commit: YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode is used to change label of a node. (Sunil G via wangda)

2016-04-05 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6e37c5fe8 -> 11e796b5c


YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode 
is used to change label of a node. (Sunil G via wangda)

(cherry picked from commit 21eb4284487d6f8e4beedb8a0c3168e952f224fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11e796b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11e796b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11e796b5

Branch: refs/heads/branch-2
Commit: 11e796b5cdff39d1f4fcd6109c4c2701426947af
Parents: 6e37c5f
Author: Wangda Tan 
Authored: Tue Apr 5 16:24:11 2016 -0700
Committer: Wangda Tan 
Committed: Tue Apr 5 16:25:55 2016 -0700

--
 .../scheduler/capacity/AbstractCSQueue.java |  6 +++
 .../scheduler/capacity/CSQueueUtils.java|  2 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 40 +++-
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e796b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 6e715fb..c7d6d02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -591,6 +591,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.incUsed(nodeLabel, resourceToInc);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.incUsedResource(nodeLabel, resourceToInc, null);
 }
@@ -604,6 +607,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.decUsed(nodeLabel, resourceToDec);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.decUsedResource(nodeLabel, resourceToDec, null);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e796b5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index 9cdcb72..0166d83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -180,7 +180,7 @@ class CSQueueUtils {
* Update partitioned resource usage, if nodePartition == null, will update
* used resource for all partitions of this queue.
*/
-  private static void updateUsedCapacity(final ResourceCalculator rc,
+  public static void updateUsedCapacity(final ResourceCalculator rc,
   final Resource totalPartitionResource, final Resource minimumAllocation,
   ResourceUsage queueResourceUsage, QueueCapacities queueCapacities,
   String nodePartition) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e796b5/hadoop-yarn-project/hadoop-yarn/hadoop-ya

hadoop git commit: YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode is used to change label of a node. (Sunil G via wangda)

2016-04-05 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8bb465564 -> b7c83d488


YARN-4699. Scheduler UI and REST o/p is not in sync when -replaceLabelsOnNode 
is used to change label of a node. (Sunil G via wangda)

(cherry picked from commit 21eb4284487d6f8e4beedb8a0c3168e952f224fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b7c83d48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b7c83d48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b7c83d48

Branch: refs/heads/branch-2.8
Commit: b7c83d488332236964a905a50afb65bf5ca70aeb
Parents: 8bb4655
Author: Wangda Tan 
Authored: Tue Apr 5 16:24:11 2016 -0700
Committer: Wangda Tan 
Committed: Tue Apr 5 16:30:56 2016 -0700

--
 .../scheduler/capacity/AbstractCSQueue.java |  6 +++
 .../scheduler/capacity/CSQueueUtils.java|  2 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 40 +++-
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7c83d48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 238aa12..549fa34 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -558,6 +558,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.incUsed(nodeLabel, resourceToInc);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.incUsedResource(nodeLabel, resourceToInc, null);
 }
@@ -571,6 +574,9 @@ public abstract class AbstractCSQueue implements CSQueue {
 }
 // ResourceUsage has its own lock, no addition lock needs here.
 queueUsage.decUsed(nodeLabel, resourceToDec);
+CSQueueUtils.updateUsedCapacity(resourceCalculator,
+labelManager.getResourceByLabel(nodeLabel, Resources.none()),
+minimumAllocation, queueUsage, queueCapacities, nodeLabel);
 if (null != parent) {
   parent.decUsedResource(nodeLabel, resourceToDec, null);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7c83d48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
index 2f981a7..c402784 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueUtils.java
@@ -180,7 +180,7 @@ class CSQueueUtils {
* Update partitioned resource usage, if nodePartition == null, will update
* used resource for all partitions of this queue.
*/
-  private static void updateUsedCapacity(final ResourceCalculator rc,
+  public static void updateUsedCapacity(final ResourceCalculator rc,
   final Resource totalPartitionResource, final Resource minimumAllocation,
   ResourceUsage queueResourceUsage, QueueCapacities queueCapacities,
   String nodePartition) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b7c83d48/hadoop-yarn-project/hadoop-yarn/hadoo

[1/2] hadoop git commit: HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream with accurate .getPos() (Contributed by kanaka kumar avvaru)

2016-04-05 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 95b870096 -> 783c99d29
  refs/heads/branch-2.7 dd701c980 -> 62da8f6fa


HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream with 
accurate .getPos() (Contributed by kanaka kumar avvaru)

(cherry picked from commit 48ca23def1d1c28448a65238814070e79c8f4c4e)
(cherry picked from commit d21bc811d82f685b0a1338bc513d9a925d305a17)

 Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62da8f6f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62da8f6f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62da8f6f

Branch: refs/heads/branch-2.7
Commit: 62da8f6fa688b503ed124313bc2f6f664175d225
Parents: dd701c9
Author: Vinayakumar B 
Authored: Wed Jun 10 11:05:58 2015 +0530
Committer: Vinayakumar B 
Committed: Wed Apr 6 10:49:34 2016 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java|  6 ++--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 34 
 3 files changed, 41 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62da8f6f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index cf78f60..24ff1c0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -918,6 +918,9 @@ Release 2.6.5 - UNRELEASED
 HADOOP-12589. Fix intermittent test failure of TestCopyPreserveFlag
 (iwasakims)
 
+HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream
+with accurate .getPos() (kanaka kumar avvaru via vinayakumarb)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62da8f6f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 8dccf04..b1061ba 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -265,11 +265,13 @@ public class RawLocalFileSystem extends FileSystem {
 if (!exists(f)) {
   throw new FileNotFoundException("File " + f + " not found");
 }
-if (getFileStatus(f).isDirectory()) {
+FileStatus status = getFileStatus(f);
+if (status.isDirectory()) {
   throw new IOException("Cannot append to a diretory (=" + f + " )");
 }
 return new FSDataOutputStream(new BufferedOutputStream(
-createOutputStreamWithMode(f, true, null), bufferSize), statistics);
+createOutputStreamWithMode(f, true, null), bufferSize), statistics,
+status.getLen());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62da8f6f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index ca78a8a..8946734 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -561,6 +561,40 @@ public class TestLocalFileSystem {
   }
 
   @Test
+  public void testAppendSetsPosCorrectly() throws Exception {
+FileSystem fs = fileSys.getRawFileSystem();
+Path file = new Path(TEST_ROOT_DIR, "test-append");
+
+fs.delete(file, true);
+FSDataOutputStream out = fs.create(file);
+
+try {
+  out.write("text1".getBytes());
+} finally {
+  out.close();
+}
+
+// Verify the position
+out = fs.append(file);
+try {
+  assertEquals(5, out.getPos());
+  out.write("text2".getBytes());
+} finally {
+  out.close();
+}
+
+// Verify the content
+FSDataInputStream in = fs.open(file);
+try {
+  byte[] buf = new byte[in.available()];
+  in.readFully(buf);
+  assertEquals("text1text2", 

[2/2] hadoop git commit: HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream with accurate .getPos() (Contributed by kanaka kumar avvaru)

2016-04-05 Thread vinayakumarb
HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream with 
accurate .getPos() (Contributed by kanaka kumar avvaru)

(cherry picked from commit 48ca23def1d1c28448a65238814070e79c8f4c4e)
(cherry picked from commit d21bc811d82f685b0a1338bc513d9a925d305a17)

(cherry picked from commit 62da8f6fa688b503ed124313bc2f6f664175d225)

 Conflicts:
hadoop-common-project/hadoop-common/CHANGES.txt

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/783c99d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/783c99d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/783c99d2

Branch: refs/heads/branch-2.6
Commit: 783c99d292e2b0984a05bdb3fdb0100abdfc2ef9
Parents: 95b8700
Author: Vinayakumar B 
Authored: Wed Jun 10 11:05:58 2015 +0530
Committer: Vinayakumar B 
Committed: Wed Apr 6 10:52:28 2016 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java|  6 ++--
 .../apache/hadoop/fs/TestLocalFileSystem.java   | 34 
 3 files changed, 41 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/783c99d2/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 39f14ca..e651711 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -30,6 +30,9 @@ Release 2.6.5 - UNRELEASED
 HADOOP-12958. PhantomReference for filesystem statistics can trigger OOM
 (Sangjin Lee via jlowe)
 
+HADOOP-7817. RawLocalFileSystem.append() should give FSDataOutputStream
+with accurate .getPos() (kanaka kumar avvaru via vinayakumarb)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/783c99d2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 2a25da6..a0680bb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -243,11 +243,13 @@ public class RawLocalFileSystem extends FileSystem {
 if (!exists(f)) {
   throw new FileNotFoundException("File " + f + " not found");
 }
-if (getFileStatus(f).isDirectory()) {
+FileStatus status = getFileStatus(f);
+if (status.isDirectory()) {
   throw new IOException("Cannot append to a diretory (=" + f + " )");
 }
 return new FSDataOutputStream(new BufferedOutputStream(
-new LocalFSFileOutputStream(f, true), bufferSize), statistics);
+new LocalFSFileOutputStream(f, true), bufferSize), statistics,
+status.getLen());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/783c99d2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index ca78a8a..8946734 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -561,6 +561,40 @@ public class TestLocalFileSystem {
   }
 
   @Test
+  public void testAppendSetsPosCorrectly() throws Exception {
+FileSystem fs = fileSys.getRawFileSystem();
+Path file = new Path(TEST_ROOT_DIR, "test-append");
+
+fs.delete(file, true);
+FSDataOutputStream out = fs.create(file);
+
+try {
+  out.write("text1".getBytes());
+} finally {
+  out.close();
+}
+
+// Verify the position
+out = fs.append(file);
+try {
+  assertEquals(5, out.getPos());
+  out.write("text2".getBytes());
+} finally {
+  out.close();
+}
+
+// Verify the content
+FSDataInputStream in = fs.open(file);
+try {
+  byte[] buf = new byte[in.available()];
+  in.readFully(buf);
+  assertEquals("text1text2", new String(buf));
+} finally {
+  in.close();
+