hadoop git commit: YARN-5377. Fix TestQueuingContainerManager.testKillMultipleOpportunisticContainers. (Konstantinos Karanasos via asuresh)

2016-11-07 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fff15858 -> f38a6d03a


YARN-5377. Fix 
TestQueuingContainerManager.testKillMultipleOpportunisticContainers. 
(Konstantinos Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f38a6d03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f38a6d03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f38a6d03

Branch: refs/heads/trunk
Commit: f38a6d03a11ca6de93a225563ddf55ec99d5063c
Parents: 3fff158
Author: Arun Suresh 
Authored: Mon Nov 7 22:10:03 2016 -0800
Committer: Arun Suresh 
Committed: Mon Nov 7 22:10:03 2016 -0800

--
 .../BaseContainerManagerTest.java   | 38 +---
 .../queuing/TestQueuingContainerManager.java|  6 ++--
 2 files changed, 29 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f38a6d03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index d359c3d..6dd1ac7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -284,15 +284,17 @@ public abstract class BaseContainerManagerTest {
 .build());
   }
 
-  public static void waitForContainerState(ContainerManagementProtocol 
containerManager,
-  ContainerId containerID, ContainerState finalState)
+  public static void waitForContainerState(
+  ContainerManagementProtocol containerManager, ContainerId containerID,
+  ContainerState finalState)
   throws InterruptedException, YarnException, IOException {
 waitForContainerState(containerManager, containerID, finalState, 20);
   }
 
-  public static void waitForContainerState(ContainerManagementProtocol 
containerManager,
-  ContainerId containerID, ContainerState finalState, int timeOutMax)
-  throws InterruptedException, YarnException, IOException {
+  public static void waitForContainerState(
+  ContainerManagementProtocol containerManager, ContainerId containerID,
+  ContainerState finalState, int timeOutMax)
+  throws InterruptedException, YarnException, IOException {
 List list = new ArrayList();
 list.add(containerID);
 GetContainerStatusesRequest request =
@@ -314,8 +316,9 @@ public abstract class BaseContainerManagerTest {
   finalState, containerStatus.getState());
   }
 
-  static void waitForApplicationState(ContainerManagerImpl containerManager,
-  ApplicationId appID, ApplicationState finalState)
+  public static void waitForApplicationState(
+  ContainerManagerImpl containerManager, ApplicationId appID,
+  ApplicationState finalState)
   throws InterruptedException {
 // Wait for app-finish
 Application app =
@@ -344,7 +347,16 @@ public abstract class BaseContainerManagerTest {
   public static void waitForNMContainerState(ContainerManagerImpl
   containerManager, ContainerId containerID,
   org.apache.hadoop.yarn.server.nodemanager.containermanager
-  .container.ContainerState finalState, int timeOutMax)
+  .container.ContainerState finalState, int timeOutMax)
+  throws InterruptedException, YarnException, IOException {
+waitForNMContainerState(containerManager, containerID,
+Arrays.asList(finalState), timeOutMax);
+  }
+
+  public static void waitForNMContainerState(ContainerManagerImpl
+  containerManager, ContainerId containerID,
+  List finalStates, int timeOutMax)
   throws InterruptedException, YarnException, IOException {
 Container container = null;
 org.apache.hadoop.yarn.server.nodemanager
@@ -358,15 +370,15 @@ public abstract class BaseContainerManagerTest {
 currentState = container.getContainerState();
   }
   if (currentState != null) {
-LOG.info("Waiting for NM container to get into state " + finalState
-+ ". Current state is " + 

hadoop git commit: YARN-5720. Update document for "rmadmin -replaceLabelOnNode". Contributred by Tao Jie

2016-11-07 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 3603e525b -> 5f386967a


YARN-5720. Update document for "rmadmin -replaceLabelOnNode". Contributred by 
Tao Jie

(cherry picked from commit 0e75496049001b00c00901fcbfe3d4229b2fd64a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f386967
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f386967
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f386967

Branch: refs/heads/branch-2.8
Commit: 5f386967a19e5181eab741030144bfdafe8a8532
Parents: 3603e52
Author: Naganarasimha 
Authored: Thu Nov 3 12:32:22 2016 +0530
Committer: Naganarasimha 
Committed: Tue Nov 8 10:37:34 2016 +0530

--
 .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md  | 2 +-
 .../hadoop-yarn-site/src/site/markdown/YarnCommands.md   | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f386967/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
index af75bfe..a87658d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeLabel.md
@@ -89,7 +89,7 @@ Notes:
 ###Add/modify node-to-labels mapping to YARN
 
 * Configuring nodes to labels mapping in **Centralized** NodeLabel setup
-* Executing ```yarn rmadmin -replaceLabelsOnNode “node1[:port]=label1 
node2=label2”```. Added label1 to node1, label2 to node2. If user don’t 
specify port, it added the label to all ```NodeManagers``` running on the node.
+* Executing ```yarn rmadmin -replaceLabelsOnNode “node1[:port]=label1 
node2=label2” [-failOnUnknownNodes]```. Added label1 to node1, label2 to 
node2. If user don’t specify port, it adds the label to all 
```NodeManagers``` running on the node. If option ```-failOnUnknownNodes``` is 
set, this command will fail if specified nodes are unknown.
 
 * Configuring nodes to labels mapping in **Distributed** NodeLabel setup
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f386967/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index 383c4cd..2670f09 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -221,7 +221,7 @@ Usage:
  -getGroups [username]
  -addToClusterNodeLabels 
<"label1(exclusive=true),label2(exclusive=false),label3">
  -removeFromClusterNodeLabels  (label splitted by 
",")
- -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2">
+ -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2"> [-failOnUnknownNodes]
  -directlyAccessNodeLabelStore
  -refreshClusterMaxPriority
  -updateNodeResource [NodeID] [MemSize] [vCores] ([OvercommitTimeout])
@@ -245,7 +245,7 @@ Usage:
 | -getGroups [username] | Get groups the specified user belongs to. |
 | -addToClusterNodeLabels 
<"label1(exclusive=true),label2(exclusive=false),label3"> | Add to cluster node 
labels. Default exclusivity is true. |
 | -removeFromClusterNodeLabels  (label splitted by ",") 
| Remove from cluster node labels. |
-| -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2"> | Replace labels on nodes (please note that we do 
not support specifying multiple labels on a single host for now.) |
+| -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2"> [-failOnUnknownNodes]| Replace labels on nodes 
(please note that we do not support specifying multiple labels on a single host 
for now.) -failOnUnknownNodes is optional, when we set this option, it will 
fail if specified nodes are unknown.|
 | -directlyAccessNodeLabelStore | This is DEPRECATED, will be removed in 
future releases. Directly access node label store, with this option, all node 
label related operations will not connect RM. Instead, they will access/modify 
stored node labels directly. By default, it is false (access via RM). AND 
PLEASE NOTE: if you configured yarn.node-labels.fs-store.root-dir to a local 

hadoop git commit: YARN-4884. Fix missing documentation about rmadmin command regarding node labels. Contributed by Kai Sasaki.

2016-11-07 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 384b7b71a -> 3603e525b


YARN-4884. Fix missing documentation about rmadmin command regarding node 
labels. Contributed by Kai Sasaki.

(cherry picked from commit f1b8f6b2c16403869f78a54268ae1165982a7050)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3603e525
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3603e525
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3603e525

Branch: refs/heads/branch-2.8
Commit: 3603e525b187bde6fe4f3267d1439343f3f11554
Parents: 384b7b7
Author: Varun Vasudev 
Authored: Thu Mar 31 14:01:48 2016 +0530
Committer: Naganarasimha 
Committed: Tue Nov 8 10:22:59 2016 +0530

--
 .../src/site/markdown/YarnCommands.md   | 45 +---
 1 file changed, 30 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3603e525/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index 7939247..383c4cd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -210,31 +210,46 @@ Start the ResourceManager
 Usage:
 
 ```
-  yarn rmadmin [-refreshQueues]
-   [-refreshNodes]
-   [-refreshUserToGroupsMapping] 
-   [-refreshSuperUserGroupsConfiguration]
-   [-refreshAdminAcls] 
-   [-refreshServiceAcl]
-   [-getGroups [username]]
-   [-transitionToActive [--forceactive] [--forcemanual] 
]
-   [-transitionToStandby [--forcemanual] ]
-   [-failover [--forcefence] [--forceactive]  
]
-   [-getServiceState ]
-   [-checkHealth ]
-   [-help [cmd]]
+  Usage: yarn rmadmin
+ -refreshQueues
+ -refreshNodes [-g [timeout in seconds]]
+ -refreshNodesResources
+ -refreshSuperUserGroupsConfiguration
+ -refreshUserToGroupsMappings
+ -refreshAdminAcls
+ -refreshServiceAcl
+ -getGroups [username]
+ -addToClusterNodeLabels 
<"label1(exclusive=true),label2(exclusive=false),label3">
+ -removeFromClusterNodeLabels  (label splitted by 
",")
+ -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2">
+ -directlyAccessNodeLabelStore
+ -refreshClusterMaxPriority
+ -updateNodeResource [NodeID] [MemSize] [vCores] ([OvercommitTimeout])
+ -transitionToActive [--forceactive] 
+ -transitionToStandby 
+ -failover [--forcefence] [--forceactive]  
+ -getServiceState 
+ -checkHealth 
+ -help [cmd]
 ```
 
 | COMMAND\_OPTIONS | Description |
 |: |: |
 | -refreshQueues | Reload the queues' acls, states and scheduler specific 
properties. ResourceManager will reload the mapred-queues configuration file. |
 | -refreshNodes | Refresh the hosts information at the ResourceManager. |
-| -refreshUserToGroupsMappings | Refresh user-to-groups mappings. |
+| -refreshNodesResources | Refresh resources of NodeManagers at the 
ResourceManager. |
 | -refreshSuperUserGroupsConfiguration | Refresh superuser proxy groups 
mappings. |
+| -refreshUserToGroupsMappings | Refresh user-to-groups mappings. |
 | -refreshAdminAcls | Refresh acls for administration of ResourceManager |
 | -refreshServiceAcl | Reload the service-level authorization policy file 
ResourceManager will reload the authorization policy file. |
 | -getGroups [username] | Get groups the specified user belongs to. |
-| -transitionToActive [--forceactive] [--forcemanual] \ | 
Transitions the service into Active state. Try to make the target active 
without checking that there is no active node if the --forceactive option is 
used. This command can not be used if automatic failover is enabled. Though you 
can override this by --forcemanual option, you need caution. |
+| -addToClusterNodeLabels 
<"label1(exclusive=true),label2(exclusive=false),label3"> | Add to cluster node 
labels. Default exclusivity is true. |
+| -removeFromClusterNodeLabels  (label splitted by ",") 
| Remove from cluster node labels. |
+| -replaceLabelsOnNode <"node1[:port]=label1,label2 
node2[:port]=label1,label2"> | Replace labels on nodes (please note that we do 
not support specifying multiple labels on a single host for now.) |
+| -directlyAccessNodeLabelStore | This is DEPRECATED, will be removed in 
future releases. 

[hadoop] Git Push Summary

2016-11-07 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/yarn-5783 [deleted] 846dfa5f8

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5783. Starvation tests. Patch v7

2016-11-07 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/yarn-5783 [created] 846dfa5f8


YARN-5783. Starvation tests. Patch v7


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/846dfa5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/846dfa5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/846dfa5f

Branch: refs/heads/yarn-5783
Commit: 846dfa5f80398de9cbc64bd4c17bd7b404d3167f
Parents: b425ca2
Author: Karthik Kambatla 
Authored: Mon Nov 7 19:41:15 2016 -0800
Committer: Karthik Kambatla 
Committed: Mon Nov 7 19:41:15 2016 -0800

--
 .../impl/pb/ApplicationAttemptIdPBImpl.java |  34 +++
 .../scheduler/SchedulerApplicationAttempt.java  |  16 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java |  16 ++
 .../scheduler/fair/FSAppAttempt.java|  16 ++
 .../scheduler/fair/FSPreemptionThread.java  |   2 +-
 .../scheduler/fair/FSStarvedApps.java   |  56 +++--
 .../scheduler/fair/FairScheduler.java   |   7 +-
 .../fair/FairSchedulerWithMockPreemption.java   |  58 +
 .../scheduler/fair/TestFSAppStarvation.java | 245 +++
 9 files changed, 425 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/846dfa5f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java
index 521d9cc..9cb65ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java
@@ -86,4 +86,38 @@ public class ApplicationAttemptIdPBImpl extends 
ApplicationAttemptId {
 proto = builder.build();
 builder = null;
   }
+
+  @Override
+  public boolean equals(Object o) {
+Preconditions.checkArgument(proto != null || builder != null,
+"One of proto and builder should be not null.");
+
+if (proto != null) {
+  return proto.equals(o);
+}
+
+if (builder != null) {
+  return builder.equals(o);
+}
+
+// Preconditions ensures we don't get here
+return false;
+  }
+
+  @Override
+  public int hashCode() {
+Preconditions.checkArgument(proto != null || builder != null,
+"One of proto and builder should be not null.");
+
+if (proto != null) {
+  return proto.hashCode();
+}
+
+if (builder != null) {
+  return builder.hashCode();
+}
+
+// Preconditions ensures we don't get here
+return 0;
+  }
 }  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/846dfa5f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index d148132..35c92ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -1206,6 +1206,22 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 this.isAttemptRecovering = isRecovering;
   }
 
+  @Override
+  public int hashCode() {
+return getApplicationAttemptId().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (! (o instanceof SchedulerApplicationAttempt)) {
+  return false;
+}
+
+SchedulerApplicationAttempt other = (SchedulerApplicationAttempt) o;
+return (this == other ||
+
this.getApplicationAttemptId().equals(other.getApplicationAttemptId()));
+  }
+
   /**
* Different state for Application Master, user can see this 

[1/2] hadoop git commit: HDFS-11114. Support for running async disk checks in DataNode.

2016-11-07 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b245e9ce2 -> 732eaaddd
  refs/heads/trunk 3dbad5d82 -> 3fff15858


HDFS-4. Support for running async disk checks in DataNode.

This closes #153.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fff1585
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fff1585
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fff1585

Branch: refs/heads/trunk
Commit: 3fff1585875ad322ce6e8acb485275e6a4360823
Parents: 3dbad5d
Author: Arpit Agarwal 
Authored: Mon Nov 7 18:45:53 2016 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 7 18:45:53 2016 -0800

--
 .../server/datanode/checker/AsyncChecker.java   |  63 +
 .../hdfs/server/datanode/checker/Checkable.java |  49 
 .../datanode/checker/ThrottledAsyncChecker.java | 224 +++
 .../server/datanode/checker/package-info.java   |  26 ++
 .../checker/TestThrottledAsyncChecker.java  | 276 +++
 5 files changed, 638 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fff1585/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
new file mode 100644
index 000..1d534a3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.checker;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A class that can be used to schedule an asynchronous check on a given
+ * {@link Checkable}. If the check is successfully scheduled then a
+ * {@link ListenableFuture} is returned.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface AsyncChecker {
+
+  /**
+   * Schedule an asynchronous check for the given object.
+   *
+   * @param target object to be checked.
+   *
+   * @param context the interpretation of the context depends on the
+   *target.
+   *
+   * @return returns a {@link ListenableFuture} that can be used to
+   * retrieve the result of the asynchronous check.
+   */
+  ListenableFuture schedule(Checkable target, K context);
+
+  /**
+   * Cancel all executing checks and wait for them to complete.
+   * First attempts a graceful cancellation, then cancels forcefully.
+   * Waits for the supplied timeout after both attempts.
+   *
+   * See {@link ExecutorService#awaitTermination} for a description of
+   * the parameters.
+   *
+   * @throws InterruptedException
+   */
+  void shutdownAndWait(long timeout, TimeUnit timeUnit)
+  throws InterruptedException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fff1585/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
new file mode 100644
index 000..833ebda
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license 

[2/2] hadoop git commit: HDFS-11114. Support for running async disk checks in DataNode.

2016-11-07 Thread arp
HDFS-4. Support for running async disk checks in DataNode.

This closes #153.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/732eaadd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/732eaadd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/732eaadd

Branch: refs/heads/branch-2
Commit: 732eaadddbe9a2682bf96ba06ce5e7ebfa58ba87
Parents: b245e9c
Author: Arpit Agarwal 
Authored: Mon Nov 7 18:45:53 2016 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 7 18:46:37 2016 -0800

--
 .../server/datanode/checker/AsyncChecker.java   |  63 +
 .../hdfs/server/datanode/checker/Checkable.java |  49 
 .../datanode/checker/ThrottledAsyncChecker.java | 224 +++
 .../server/datanode/checker/package-info.java   |  26 ++
 .../checker/TestThrottledAsyncChecker.java  | 276 +++
 5 files changed, 638 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/732eaadd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
new file mode 100644
index 000..1d534a3
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.checker;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A class that can be used to schedule an asynchronous check on a given
+ * {@link Checkable}. If the check is successfully scheduled then a
+ * {@link ListenableFuture} is returned.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface AsyncChecker {
+
+  /**
+   * Schedule an asynchronous check for the given object.
+   *
+   * @param target object to be checked.
+   *
+   * @param context the interpretation of the context depends on the
+   *target.
+   *
+   * @return returns a {@link ListenableFuture} that can be used to
+   * retrieve the result of the asynchronous check.
+   */
+  ListenableFuture schedule(Checkable target, K context);
+
+  /**
+   * Cancel all executing checks and wait for them to complete.
+   * First attempts a graceful cancellation, then cancels forcefully.
+   * Waits for the supplied timeout after both attempts.
+   *
+   * See {@link ExecutorService#awaitTermination} for a description of
+   * the parameters.
+   *
+   * @throws InterruptedException
+   */
+  void shutdownAndWait(long timeout, TimeUnit timeUnit)
+  throws InterruptedException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/732eaadd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
new file mode 100644
index 000..833ebda
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/Checkable.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright 

hadoop git commit: HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. Contributed by Erik Krogen.

2016-11-07 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c1a6f4f2e -> 64657cd67


HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. 
Contributed by Erik Krogen.

(cherry picked from commit 3dbad5d823b8bf61b643dd1057165044138b99e0)
(cherry picked from commit b245e9ce2f20bb84690bffe902a60d5e96130cdb)
(cherry picked from commit 384b7b71a3d22bcc27bbdb9002ad700015b86eab)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64657cd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64657cd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64657cd6

Branch: refs/heads/branch-2.7
Commit: 64657cd675b6a4f5b77ef4d4237ab569ecd5e514
Parents: c1a6f4f
Author: Zhe Zhang 
Authored: Mon Nov 7 16:08:10 2016 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 7 16:22:16 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../apache/hadoop/metrics2/lib/MutableStat.java  |  4 
 .../apache/hadoop/metrics2/util/SampleStat.java  | 19 +++
 .../hadoop/metrics2/lib/TestMutableMetrics.java  | 17 +
 4 files changed, 39 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64657cd6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c869571..9ed18a7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -90,6 +90,9 @@ Release 2.7.4 - UNRELEASED
 HADOOP-12483. Maintain wrapped SASL ordering for postponed IPC responses.
 (Daryn Sharp via yliu)
 
+HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used.
+(Erik Krogen via zhz)
+
 Release 2.7.3 - 2016-08-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64657cd6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index f104420..b5d9929 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -99,6 +99,10 @@ public class MutableStat extends MutableMetric {
 
   /**
* Add a number of samples and their sum to the running stat
+   *
+   * Note that although use of this method will preserve accurate mean values,
+   * large values for numSamples may result in inaccurate variance values due
+   * to the use of a single step of the Welford method for variance 
calculation.
* @param numSamples  number of samples
* @param sum of the samples
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64657cd6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
index 589062a..be00a65 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
@@ -27,29 +27,32 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class SampleStat {
   private final MinMax minmax = new MinMax();
   private long numSamples = 0;
-  private double a0, a1, s0, s1;
+  private double a0, a1, s0, s1, total;
 
   /**
* Construct a new running sample stat
*/
   public SampleStat() {
 a0 = s0 = 0.0;
+total = 0.0;
   }
 
   public void reset() {
 numSamples = 0;
 a0 = s0 = 0.0;
+total = 0.0;
 minmax.reset();
   }
 
   // We want to reuse the object, sometimes.
   void reset(long numSamples, double a0, double a1, double s0, double s1,
- MinMax minmax) {
+  double total, MinMax minmax) {
 this.numSamples = numSamples;
 this.a0 = a0;
 this.a1 = a1;
 this.s0 = s0;
 this.s1 = s1;
+this.total = total;
 this.minmax.reset(minmax);
   }
 
@@ -58,7 +61,7 @@ public class SampleStat {
* @param other the destination to hold our values
*/
   public void copyTo(SampleStat other) {
-

hadoop git commit: HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. Contributed by Erik Krogen.

2016-11-07 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5231c527a -> 384b7b71a


HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. 
Contributed by Erik Krogen.

(cherry picked from commit 3dbad5d823b8bf61b643dd1057165044138b99e0)
(cherry picked from commit b245e9ce2f20bb84690bffe902a60d5e96130cdb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/384b7b71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/384b7b71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/384b7b71

Branch: refs/heads/branch-2.8
Commit: 384b7b71a3d22bcc27bbdb9002ad700015b86eab
Parents: 5231c52
Author: Zhe Zhang 
Authored: Mon Nov 7 16:08:10 2016 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 7 16:15:20 2016 -0800

--
 .../apache/hadoop/metrics2/lib/MutableStat.java  |  4 
 .../apache/hadoop/metrics2/util/SampleStat.java  | 19 +++
 .../hadoop/metrics2/lib/TestMutableMetrics.java  | 17 +
 3 files changed, 36 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/384b7b71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index 9410c76..ac9f79e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -99,6 +99,10 @@ public class MutableStat extends MutableMetric {
 
   /**
* Add a number of samples and their sum to the running stat
+   *
+   * Note that although use of this method will preserve accurate mean values,
+   * large values for numSamples may result in inaccurate variance values due
+   * to the use of a single step of the Welford method for variance 
calculation.
* @param numSamples  number of samples
* @param sum of the samples
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/384b7b71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
index cd9aaa4..23abfc4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
@@ -27,29 +27,32 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class SampleStat {
   private final MinMax minmax = new MinMax();
   private long numSamples = 0;
-  private double a0, a1, s0, s1;
+  private double a0, a1, s0, s1, total;
 
   /**
* Construct a new running sample stat
*/
   public SampleStat() {
 a0 = s0 = 0.0;
+total = 0.0;
   }
 
   public void reset() {
 numSamples = 0;
 a0 = s0 = 0.0;
+total = 0.0;
 minmax.reset();
   }
 
   // We want to reuse the object, sometimes.
   void reset(long numSamples, double a0, double a1, double s0, double s1,
- MinMax minmax) {
+  double total, MinMax minmax) {
 this.numSamples = numSamples;
 this.a0 = a0;
 this.a1 = a1;
 this.s0 = s0;
 this.s1 = s1;
+this.total = total;
 this.minmax.reset(minmax);
   }
 
@@ -58,7 +61,7 @@ public class SampleStat {
* @param other the destination to hold our values
*/
   public void copyTo(SampleStat other) {
-other.reset(numSamples, a0, a1, s0, s1, minmax);
+other.reset(numSamples, a0, a1, s0, s1, total, minmax);
   }
 
   /**
@@ -80,6 +83,7 @@ public class SampleStat {
*/
   public SampleStat add(long nSamples, double x) {
 numSamples += nSamples;
+total += x;
 
 if (numSamples == 1) {
   a0 = a1 = x;
@@ -103,10 +107,17 @@ public class SampleStat {
   }
 
   /**
+   * @return the total of all samples added
+   */
+  public double total() {
+return total;
+  }
+
+  /**
* @return  the arithmetic mean of the samples
*/
   public double mean() {
-return numSamples > 0 ? a1 : 0.0;
+return numSamples > 0 ? (total / numSamples) : 0.0;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/384b7b71/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java

hadoop git commit: HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. Contributed by Erik Krogen.

2016-11-07 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6cdcab907 -> b245e9ce2


HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. 
Contributed by Erik Krogen.

(cherry picked from commit 3dbad5d823b8bf61b643dd1057165044138b99e0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b245e9ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b245e9ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b245e9ce

Branch: refs/heads/branch-2
Commit: b245e9ce2f20bb84690bffe902a60d5e96130cdb
Parents: 6cdcab9
Author: Zhe Zhang 
Authored: Mon Nov 7 16:08:10 2016 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 7 16:11:27 2016 -0800

--
 .../apache/hadoop/metrics2/lib/MutableStat.java  |  4 
 .../apache/hadoop/metrics2/util/SampleStat.java  | 19 +++
 .../hadoop/metrics2/lib/TestMutableMetrics.java  | 17 +
 3 files changed, 36 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b245e9ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index 5108624..132f57c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -102,6 +102,10 @@ public class MutableStat extends MutableMetric {
 
   /**
* Add a number of samples and their sum to the running stat
+   *
+   * Note that although use of this method will preserve accurate mean values,
+   * large values for numSamples may result in inaccurate variance values due
+   * to the use of a single step of the Welford method for variance 
calculation.
* @param numSamples  number of samples
* @param sum of the samples
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b245e9ce/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
index cd9aaa4..23abfc4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
@@ -27,29 +27,32 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class SampleStat {
   private final MinMax minmax = new MinMax();
   private long numSamples = 0;
-  private double a0, a1, s0, s1;
+  private double a0, a1, s0, s1, total;
 
   /**
* Construct a new running sample stat
*/
   public SampleStat() {
 a0 = s0 = 0.0;
+total = 0.0;
   }
 
   public void reset() {
 numSamples = 0;
 a0 = s0 = 0.0;
+total = 0.0;
 minmax.reset();
   }
 
   // We want to reuse the object, sometimes.
   void reset(long numSamples, double a0, double a1, double s0, double s1,
- MinMax minmax) {
+  double total, MinMax minmax) {
 this.numSamples = numSamples;
 this.a0 = a0;
 this.a1 = a1;
 this.s0 = s0;
 this.s1 = s1;
+this.total = total;
 this.minmax.reset(minmax);
   }
 
@@ -58,7 +61,7 @@ public class SampleStat {
* @param other the destination to hold our values
*/
   public void copyTo(SampleStat other) {
-other.reset(numSamples, a0, a1, s0, s1, minmax);
+other.reset(numSamples, a0, a1, s0, s1, total, minmax);
   }
 
   /**
@@ -80,6 +83,7 @@ public class SampleStat {
*/
   public SampleStat add(long nSamples, double x) {
 numSamples += nSamples;
+total += x;
 
 if (numSamples == 1) {
   a0 = a1 = x;
@@ -103,10 +107,17 @@ public class SampleStat {
   }
 
   /**
+   * @return the total of all samples added
+   */
+  public double total() {
+return total;
+  }
+
+  /**
* @return  the arithmetic mean of the samples
*/
   public double mean() {
-return numSamples > 0 ? a1 : 0.0;
+return numSamples > 0 ? (total / numSamples) : 0.0;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b245e9ce/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
--
diff --git 

hadoop git commit: HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. Contributed by Erik Krogen.

2016-11-07 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/trunk de3b4aac5 -> 3dbad5d82


HADOOP-13804. MutableStat mean loses accuracy if add(long, long) is used. 
Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dbad5d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dbad5d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dbad5d8

Branch: refs/heads/trunk
Commit: 3dbad5d823b8bf61b643dd1057165044138b99e0
Parents: de3b4aa
Author: Zhe Zhang 
Authored: Mon Nov 7 16:08:10 2016 -0800
Committer: Zhe Zhang 
Committed: Mon Nov 7 16:08:10 2016 -0800

--
 .../apache/hadoop/metrics2/lib/MutableStat.java  |  4 
 .../apache/hadoop/metrics2/util/SampleStat.java  | 19 +++
 .../hadoop/metrics2/lib/TestMutableMetrics.java  | 17 +
 3 files changed, 36 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dbad5d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index ae68874..92fe3d1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -102,6 +102,10 @@ public class MutableStat extends MutableMetric {
 
   /**
* Add a number of samples and their sum to the running stat
+   *
+   * Note that although use of this method will preserve accurate mean values,
+   * large values for numSamples may result in inaccurate variance values due
+   * to the use of a single step of the Welford method for variance 
calculation.
* @param numSamples  number of samples
* @param sum of the samples
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dbad5d8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
index cd9aaa4..23abfc4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
@@ -27,29 +27,32 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class SampleStat {
   private final MinMax minmax = new MinMax();
   private long numSamples = 0;
-  private double a0, a1, s0, s1;
+  private double a0, a1, s0, s1, total;
 
   /**
* Construct a new running sample stat
*/
   public SampleStat() {
 a0 = s0 = 0.0;
+total = 0.0;
   }
 
   public void reset() {
 numSamples = 0;
 a0 = s0 = 0.0;
+total = 0.0;
 minmax.reset();
   }
 
   // We want to reuse the object, sometimes.
   void reset(long numSamples, double a0, double a1, double s0, double s1,
- MinMax minmax) {
+  double total, MinMax minmax) {
 this.numSamples = numSamples;
 this.a0 = a0;
 this.a1 = a1;
 this.s0 = s0;
 this.s1 = s1;
+this.total = total;
 this.minmax.reset(minmax);
   }
 
@@ -58,7 +61,7 @@ public class SampleStat {
* @param other the destination to hold our values
*/
   public void copyTo(SampleStat other) {
-other.reset(numSamples, a0, a1, s0, s1, minmax);
+other.reset(numSamples, a0, a1, s0, s1, total, minmax);
   }
 
   /**
@@ -80,6 +83,7 @@ public class SampleStat {
*/
   public SampleStat add(long nSamples, double x) {
 numSamples += nSamples;
+total += x;
 
 if (numSamples == 1) {
   a0 = a1 = x;
@@ -103,10 +107,17 @@ public class SampleStat {
   }
 
   /**
+   * @return the total of all samples added
+   */
+  public double total() {
+return total;
+  }
+
+  /**
* @return  the arithmetic mean of the samples
*/
   public double mean() {
-return numSamples > 0 ? a1 : 0.0;
+return numSamples > 0 ? (total / numSamples) : 0.0;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dbad5d8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
--
diff --git 

hadoop git commit: YARN-3359. Addendum for Recover collector list when RM fails over (Li Lu via Varun Saxena)

2016-11-07 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 3e4da481d -> 0b89ab247


YARN-3359. Addendum for Recover collector list when RM fails over (Li Lu via 
Varun Saxena)

(cherry picked from commit 25b19178dd53bc0d57ca5fd64d3464af7b59e588)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b89ab24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b89ab24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b89ab24

Branch: refs/heads/YARN-5355-branch-2
Commit: 0b89ab2476721f29d0333e14c5e4242b9b4abbd9
Parents: 3e4da48
Author: Varun Saxena 
Authored: Tue Nov 8 04:01:37 2016 +0530
Committer: Varun Saxena 
Committed: Tue Nov 8 04:17:56 2016 +0530

--
 .../apache/hadoop/yarn/server/nodemanager/Context.java  |  4 ++--
 .../hadoop/yarn/server/nodemanager/NodeManager.java | 12 +++-
 .../server/nodemanager/amrmproxy/BaseAMRMProxyTest.java |  7 +--
 3 files changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b89ab24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
index 0d71057..e4466ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
@@ -79,14 +79,14 @@ public interface Context {
* @return registering collectors, or null if the timeline service v.2 is not
* enabled
*/
-  Map getRegisteringCollectors();
+  ConcurrentMap getRegisteringCollectors();
 
   /**
* Get the list of collectors registered with the RM and known by this node.
* @return known collectors, or null if the timeline service v.2 is not
* enabled.
*/
-  Map getKnownCollectors();
+  ConcurrentMap getKnownCollectors();
 
   ConcurrentMap getContainers();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b89ab24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index cde571d..15c3c4d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -490,7 +490,7 @@ public class NodeManager extends CompositeService
 if (knownCollectors == null) {
   return;
 }
-Map registeringCollectors
+ConcurrentMap registeringCollectors
 = context.getRegisteringCollectors();
 for (Map.Entry entry
 : knownCollectors.entrySet()) {
@@ -527,9 +527,10 @@ public class NodeManager extends CompositeService
 protected final ConcurrentMap containers =
 new ConcurrentSkipListMap();
 
-private Map registeringCollectors;
+private ConcurrentMap
+registeringCollectors;
 
-private Map knownCollectors;
+private ConcurrentMap knownCollectors;
 
 protected final ConcurrentMap

hadoop git commit: YARN-3359. Addendum for Recover collector list when RM fails over (Li Lu via Varun Saxena)

2016-11-07 Thread varunsaxena
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 1c8a57550 -> 25b19178d


YARN-3359. Addendum for Recover collector list when RM fails over (Li Lu via 
Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25b19178
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25b19178
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25b19178

Branch: refs/heads/YARN-5355
Commit: 25b19178dd53bc0d57ca5fd64d3464af7b59e588
Parents: 1c8a575
Author: Varun Saxena 
Authored: Tue Nov 8 04:01:37 2016 +0530
Committer: Varun Saxena 
Committed: Tue Nov 8 04:01:37 2016 +0530

--
 .../apache/hadoop/yarn/server/nodemanager/Context.java  |  4 ++--
 .../hadoop/yarn/server/nodemanager/NodeManager.java | 12 +++-
 .../server/nodemanager/amrmproxy/BaseAMRMProxyTest.java |  7 +--
 3 files changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25b19178/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
index b92526b..83c994d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java
@@ -79,14 +79,14 @@ public interface Context {
* @return registering collectors, or null if the timeline service v.2 is not
* enabled
*/
-  Map getRegisteringCollectors();
+  ConcurrentMap getRegisteringCollectors();
 
   /**
* Get the list of collectors registered with the RM and known by this node.
* @return known collectors, or null if the timeline service v.2 is not
* enabled.
*/
-  Map getKnownCollectors();
+  ConcurrentMap getKnownCollectors();
 
   ConcurrentMap getContainers();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25b19178/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 30ec282..73f4450 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -489,7 +489,7 @@ public class NodeManager extends CompositeService
 if (knownCollectors == null) {
   return;
 }
-Map registeringCollectors
+ConcurrentMap registeringCollectors
 = context.getRegisteringCollectors();
 for (Map.Entry entry
 : knownCollectors.entrySet()) {
@@ -526,9 +526,10 @@ public class NodeManager extends CompositeService
 protected final ConcurrentMap containers =
 new ConcurrentSkipListMap();
 
-private Map registeringCollectors;
+private ConcurrentMap
+registeringCollectors;
 
-private Map knownCollectors;
+private ConcurrentMap knownCollectors;
 
 protected final ConcurrentMap increasedContainers =
@@ -725,12 +726,13 @@ public class NodeManager extends CompositeService
 }
 
 @Override
-public Map getRegisteringCollectors() {
+public ConcurrentMap

[7/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
YARN-5716. Add global scheduler interface definition and update 
CapacityScheduler to use it. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de3b4aac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de3b4aac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de3b4aac

Branch: refs/heads/trunk
Commit: de3b4aac561258ad242a3c5ed1c919428893fd4c
Parents: acd509d
Author: Jian He 
Authored: Mon Nov 7 10:14:39 2016 -0800
Committer: Jian He 
Committed: Mon Nov 7 10:14:39 2016 -0800

--
 .../dev-support/findbugs-exclude.xml|   9 +
 .../rmcontainer/RMContainer.java|  13 +
 .../rmcontainer/RMContainerImpl.java|  78 +-
 .../scheduler/AppSchedulingInfo.java| 168 +++-
 .../scheduler/SchedulerApplicationAttempt.java  |  73 +-
 .../scheduler/activities/ActivitiesLogger.java  |  17 +-
 .../scheduler/activities/ActivitiesManager.java |   7 +-
 .../scheduler/capacity/AbstractCSQueue.java |  71 ++
 .../scheduler/capacity/CSAssignment.java|  33 +
 .../scheduler/capacity/CSQueue.java |  19 +-
 .../scheduler/capacity/CapacityScheduler.java   | 773 ++-
 .../CapacitySchedulerConfiguration.java |   4 +
 .../scheduler/capacity/LeafQueue.java   | 451 ++-
 .../scheduler/capacity/ParentQueue.java | 428 +-
 .../allocator/AbstractContainerAllocator.java   |  39 +-
 .../capacity/allocator/ContainerAllocation.java |  12 +-
 .../capacity/allocator/ContainerAllocator.java  |  15 +-
 .../allocator/IncreaseContainerAllocator.java   |  89 +--
 .../allocator/RegularContainerAllocator.java| 215 +++---
 .../scheduler/common/AssignmentInformation.java |  44 +-
 .../common/ContainerAllocationProposal.java | 111 +++
 .../common/ResourceAllocationCommitter.java |  29 +
 .../scheduler/common/ResourceCommitRequest.java | 164 
 .../scheduler/common/SchedulerContainer.java|  80 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java | 624 ---
 .../scheduler/fifo/FifoAppAttempt.java  | 110 +++
 .../scheduler/fifo/FifoScheduler.java   |  55 +-
 .../scheduler/placement/PlacementSet.java   |  65 ++
 .../scheduler/placement/PlacementSetUtils.java  |  36 +
 .../placement/ResourceRequestUpdateResult.java  |  43 ++
 .../placement/SchedulingPlacementSet.java   |  90 +++
 .../scheduler/placement/SimplePlacementSet.java |  70 ++
 .../AbstractComparatorOrderingPolicy.java   |   4 +-
 .../scheduler/policy/FairOrderingPolicy.java|   3 +-
 .../scheduler/policy/FifoOrderingPolicy.java|   4 +-
 .../FifoOrderingPolicyForPendingApps.java   |   3 +-
 .../yarn/server/resourcemanager/MockRM.java |  47 +-
 .../resourcemanager/TestClientRMService.java|   2 +-
 .../scheduler/TestSchedulerHealth.java  |   6 +-
 .../capacity/TestCapacityScheduler.java |  56 +-
 .../TestCapacitySchedulerAsyncScheduling.java   | 143 
 .../scheduler/capacity/TestChildQueueOrder.java |  21 +-
 .../capacity/TestContainerAllocation.java   |  45 +-
 .../capacity/TestContainerResizing.java |  10 +-
 .../scheduler/capacity/TestLeafQueue.java   | 647 +++-
 .../scheduler/capacity/TestParentQueue.java | 209 +++--
 .../scheduler/capacity/TestReservations.java| 277 +--
 .../scheduler/capacity/TestUtils.java   |  26 +
 .../TestRMWebServicesSchedulerActivities.java   |   8 +-
 49 files changed, 4212 insertions(+), 1334 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 01b1da7..ab36a4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -574,4 +574,13 @@
     
 
   
+
+
+  
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
 

[4/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
new file mode 100644
index 000..8b4907b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common;
+
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey;
+
+/**
+ * Contexts for a container inside scheduler
+ */
+public class SchedulerContainer {
+  private RMContainer rmContainer;
+  private String nodePartition;
+  private A schedulerApplicationAttempt;
+  private N schedulerNode;
+  private boolean allocated; // Allocated (True) or reserved (False)
+
+  public SchedulerContainer(A app, N node, RMContainer rmContainer,
+  String nodePartition, boolean allocated) {
+this.schedulerApplicationAttempt = app;
+this.schedulerNode = node;
+this.rmContainer = rmContainer;
+this.nodePartition = nodePartition;
+this.allocated = allocated;
+  }
+
+  public String getNodePartition() {
+return nodePartition;
+  }
+
+  public RMContainer getRmContainer() {
+return rmContainer;
+  }
+
+  public A getSchedulerApplicationAttempt() {
+return schedulerApplicationAttempt;
+  }
+
+  public N getSchedulerNode() {
+return schedulerNode;
+  }
+
+  public boolean isAllocated() {
+return allocated;
+  }
+
+  public SchedulerRequestKey getSchedulerRequestKey() {
+if (rmContainer.getState() == RMContainerState.RESERVED) {
+  return rmContainer.getReservedSchedulerKey();
+}
+return rmContainer.getAllocatedSchedulerKey();
+  }
+
+  @Override
+  public String toString() {
+return "(Application=" + schedulerApplicationAttempt
+.getApplicationAttemptId() + "; Node=" + schedulerNode.getNodeID()
++ "; Resource=" + rmContainer.getAllocatedOrReservedResource() + ")";
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index ebe70d4..6d9dda8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -18,14 +18,7 @@
 
 package 

[6/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index d759d47..7e98f10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -32,7 +32,9 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.lang.StringUtils;
@@ -112,7 +114,11 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Alloca
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceAllocationCommitter;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -128,6 +134,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourc
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -142,11 +151,12 @@ import com.google.common.base.Preconditions;
 @SuppressWarnings("unchecked")
 public class CapacityScheduler extends
 AbstractYarnScheduler implements
-PreemptableResourceScheduler, CapacitySchedulerContext, Configurable {
+PreemptableResourceScheduler, CapacitySchedulerContext, Configurable,
+ResourceAllocationCommitter {
 
   private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
   private YarnAuthorizationProvider authorizer;
- 
+
   private CSQueue root;
   // timeout to join when we stop this service
   protected final long THREAD_JOIN_TIMEOUT_MS = 1000;
@@ -155,6 +165,8 @@ public class CapacityScheduler extends
 
   private volatile boolean isLazyPreemptionEnabled = false;
 
+  private int offswitchPerHeartbeatLimit;
+
   static final Comparator nonPartitionedQueueComparator =
   new Comparator() {
 @Override
@@ -176,7 +188,7 @@ public class CapacityScheduler extends
   public void setConf(Configuration conf) {
   yarnConf = conf;
   }
-  
+
   private void validateConf(Configuration conf) {
 // validate scheduler memory allocation setting
 int minMem = conf.getInt(
@@ -229,7 +241,8 @@ public class CapacityScheduler extends
   private boolean usePortForNodeName;
 
   private boolean scheduleAsynchronously;
-  private AsyncScheduleThread asyncSchedulerThread;
+  private List asyncSchedulerThreads;
+  private 

[3/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
new file mode 100644
index 000..b2cf805
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestCapacitySchedulerAsyncScheduling {
+  private final int GB = 1024;
+
+  private YarnConfiguration conf;
+
+  RMNodeLabelsManager mgr;
+
+  @Before
+  public void setUp() throws Exception {
+conf = new YarnConfiguration();
+conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ResourceScheduler.class);
+conf.setBoolean(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
+mgr = new NullRMNodeLabelsManager();
+mgr.init(conf);
+  }
+
+  @Test(timeout = 30)
+  public void testSingleThreadAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(1);
+  }
+
+  @Test(timeout = 30)
+  public void testTwoThreadsAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(2);
+  }
+
+  @Test(timeout = 30)
+  public void testThreeThreadsAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(3);
+  }
+
+  public void testAsyncContainerAllocation(int numThreads) throws Exception {
+conf.setInt(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD,
+numThreads);
+conf.setInt(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
++ ".scheduling-interval-ms", 100);
+
+final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+mgr.init(conf);
+
+// inject node label manager
+MockRM rm = new MockRM(TestUtils.getConfigurationWithMultipleQueues(conf)) 
{
+  @Override
+  public RMNodeLabelsManager createNodeLabelManager() {
+return mgr;
+  }
+};
+
+rm.getRMContext().setNodeLabelManager(mgr);
+rm.start();
+
+List nms = new ArrayList<>();
+// Add 10 nodes to the cluster, in the cluster we have 200 GB resource
+for (int i = 0; i < 10; i++) {
+  nms.add(rm.registerNode("h-" + i + ":1234", 20 * GB));
+}
+
+List ams = new ArrayList();
+// Add 3 applications to the cluster, one app in one queue
+// the i-th app ask (20 * i) containers. So in total we will have
+// 123G container allocated
+int totalAsked = 3 * GB; // 3 AMs
+
+for (int i = 0; i < 3; i++) {
+  RMApp rmApp = rm.submitApp(1024, "app", "user", null, false,
+  

[7/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
YARN-5716. Add global scheduler interface definition and update 
CapacityScheduler to use it. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cdcab90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cdcab90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cdcab90

Branch: refs/heads/branch-2
Commit: 6cdcab9078e231a460e36f795c36345e09ee8200
Parents: 29c6a0b
Author: Jian He 
Authored: Mon Nov 7 10:14:39 2016 -0800
Committer: Jian He 
Committed: Mon Nov 7 10:31:43 2016 -0800

--
 .../dev-support/findbugs-exclude.xml|   9 +
 .../rmcontainer/RMContainer.java|  13 +
 .../rmcontainer/RMContainerImpl.java|  78 +-
 .../scheduler/AppSchedulingInfo.java| 168 +++-
 .../scheduler/SchedulerApplicationAttempt.java  |  73 +-
 .../scheduler/activities/ActivitiesLogger.java  |  17 +-
 .../scheduler/activities/ActivitiesManager.java |   7 +-
 .../scheduler/capacity/AbstractCSQueue.java |  71 ++
 .../scheduler/capacity/CSAssignment.java|  33 +
 .../scheduler/capacity/CSQueue.java |  19 +-
 .../scheduler/capacity/CapacityScheduler.java   | 773 ++-
 .../CapacitySchedulerConfiguration.java |   4 +
 .../scheduler/capacity/LeafQueue.java   | 451 ++-
 .../scheduler/capacity/ParentQueue.java | 428 +-
 .../allocator/AbstractContainerAllocator.java   |  39 +-
 .../capacity/allocator/ContainerAllocation.java |  12 +-
 .../capacity/allocator/ContainerAllocator.java  |  15 +-
 .../allocator/IncreaseContainerAllocator.java   |  89 +--
 .../allocator/RegularContainerAllocator.java| 215 +++---
 .../scheduler/common/AssignmentInformation.java |  42 +-
 .../common/ContainerAllocationProposal.java | 111 +++
 .../common/ResourceAllocationCommitter.java |  29 +
 .../scheduler/common/ResourceCommitRequest.java | 164 
 .../scheduler/common/SchedulerContainer.java|  80 ++
 .../scheduler/common/fica/FiCaSchedulerApp.java | 623 ---
 .../scheduler/fifo/FifoAppAttempt.java  | 110 +++
 .../scheduler/fifo/FifoScheduler.java   |  55 +-
 .../scheduler/placement/PlacementSet.java   |  65 ++
 .../scheduler/placement/PlacementSetUtils.java  |  36 +
 .../placement/ResourceRequestUpdateResult.java  |  43 ++
 .../placement/SchedulingPlacementSet.java   |  90 +++
 .../scheduler/placement/SimplePlacementSet.java |  70 ++
 .../AbstractComparatorOrderingPolicy.java   |   4 +-
 .../scheduler/policy/FairOrderingPolicy.java|   3 +-
 .../scheduler/policy/FifoOrderingPolicy.java|   4 +-
 .../FifoOrderingPolicyForPendingApps.java   |   3 +-
 .../yarn/server/resourcemanager/MockRM.java |  47 +-
 .../resourcemanager/TestClientRMService.java|   2 +-
 .../scheduler/TestSchedulerHealth.java  |   6 +-
 .../capacity/TestCapacityScheduler.java |  56 +-
 .../TestCapacitySchedulerAsyncScheduling.java   | 144 
 .../scheduler/capacity/TestChildQueueOrder.java |  21 +-
 .../capacity/TestContainerAllocation.java   |  45 +-
 .../capacity/TestContainerResizing.java |  10 +-
 .../scheduler/capacity/TestLeafQueue.java   | 647 +++-
 .../scheduler/capacity/TestParentQueue.java | 209 +++--
 .../scheduler/capacity/TestReservations.java| 277 +--
 .../scheduler/capacity/TestUtils.java   |  26 +
 .../TestRMWebServicesSchedulerActivities.java   |   8 +-
 49 files changed, 4212 insertions(+), 1332 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 2be45e5..a2c5562 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -547,4 +547,13 @@
     
 
   
+
+
+  
+  
+
+
+
+  
+
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainer.java
 

[5/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index a69af6e..fd0c68b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -47,8 +47,13 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import java.io.IOException;
@@ -71,12 +76,10 @@ public class ParentQueue extends AbstractCSQueue {
 
   protected final Set childQueues;  
   private final boolean rootQueue;
-  final Comparator nonPartitionedQueueComparator;
-  final PartitionedQueueComparator partitionQueueComparator;
-  volatile int numApplications;
+  private final Comparator nonPartitionedQueueComparator;
+  private final PartitionedQueueComparator partitionQueueComparator;
+  private volatile int numApplications;
   private final CapacitySchedulerContext scheduler;
-  private boolean needToResortQueuesAtNextAllocation = false;
-  private int offswitchPerHeartbeatLimit;
 
   private final RecordFactory recordFactory = 
 RecordFactoryProvider.getRecordFactory(null);
@@ -86,7 +89,7 @@ public class ParentQueue extends AbstractCSQueue {
 super(cs, queueName, parent, old);
 this.scheduler = cs;
 this.nonPartitionedQueueComparator = cs.getNonPartitionedQueueComparator();
-this.partitionQueueComparator = cs.getPartitionedQueueComparator();
+this.partitionQueueComparator = new PartitionedQueueComparator();
 
 this.rootQueue = (parent == null);
 
@@ -126,16 +129,12 @@ public class ParentQueue extends AbstractCSQueue {
 }
   }
 
-  offswitchPerHeartbeatLimit =
-csContext.getConfiguration().getOffSwitchPerHeartbeatLimit();
-
   LOG.info(queueName + ", capacity=" + this.queueCapacities.getCapacity()
   + ", absoluteCapacity=" + this.queueCapacities.getAbsoluteCapacity()
   + ", maxCapacity=" + this.queueCapacities.getMaximumCapacity()
   + ", absoluteMaxCapacity=" + this.queueCapacities
   .getAbsoluteMaximumCapacity() + ", state=" + state + ", acls="
   + aclsString + ", labels=" + labelStrBuilder.toString() + "\n"
-  + ", offswitchPerHeartbeatLimit = " + getOffSwitchPerHeartbeatLimit()
   + ", reservationsContinueLooking=" + reservationsContinueLooking);
 } finally {
   writeLock.unlock();
@@ -215,11 +214,6 @@ public class ParentQueue extends AbstractCSQueue {
 
   }
 
-  @Private
-  public int getOffSwitchPerHeartbeatLimit() {
-return offswitchPerHeartbeatLimit;
-  }
-
   private QueueUserACLInfo getUserAclInfo(
   UserGroupInformation user) {
 try {
@@ -435,156 +429,145 @@ public class ParentQueue extends AbstractCSQueue {
 
   @Override
   public CSAssignment assignContainers(Resource clusterResource,
-  FiCaSchedulerNode node, ResourceLimits resourceLimits,
-  SchedulingMode schedulingMode) {
-int offswitchCount = 0;
-try {
-  writeLock.lock();
-  // if our queue cannot access this node, just return
-  if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY
-  && 

[6/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index fd31a2d..09b59ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -32,7 +32,9 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.lang.StringUtils;
@@ -113,7 +115,11 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Alloca
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.KillableContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceAllocationCommitter;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -129,6 +135,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeResourc
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SimplePlacementSet;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.Lock;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@@ -143,11 +152,12 @@ import com.google.common.base.Preconditions;
 @SuppressWarnings("unchecked")
 public class CapacityScheduler extends
 AbstractYarnScheduler implements
-PreemptableResourceScheduler, CapacitySchedulerContext, Configurable {
+PreemptableResourceScheduler, CapacitySchedulerContext, Configurable,
+ResourceAllocationCommitter {
 
   private static final Log LOG = LogFactory.getLog(CapacityScheduler.class);
   private YarnAuthorizationProvider authorizer;
- 
+
   private CSQueue root;
   // timeout to join when we stop this service
   protected final long THREAD_JOIN_TIMEOUT_MS = 1000;
@@ -156,6 +166,8 @@ public class CapacityScheduler extends
 
   private volatile boolean isLazyPreemptionEnabled = false;
 
+  private int offswitchPerHeartbeatLimit;
+
   static final Comparator nonPartitionedQueueComparator =
   new Comparator() {
 @Override
@@ -177,7 +189,7 @@ public class CapacityScheduler extends
   public void setConf(Configuration conf) {
   yarnConf = conf;
   }
-  
+
   private void validateConf(Configuration conf) {
 // validate scheduler memory allocation setting
 int minMem = conf.getInt(
@@ -230,7 +242,8 @@ public class CapacityScheduler extends
   private boolean usePortForNodeName;
 
   private boolean scheduleAsynchronously;
-  private AsyncScheduleThread asyncSchedulerThread;
+  private List asyncSchedulerThreads;
+  private 

[3/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
new file mode 100644
index 000..9854a15
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAsyncScheduling.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class TestCapacitySchedulerAsyncScheduling {
+  private final int GB = 1024;
+
+  private YarnConfiguration conf;
+
+  RMNodeLabelsManager mgr;
+
+  @Before
+  public void setUp() throws Exception {
+conf = new YarnConfiguration();
+conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+ResourceScheduler.class);
+conf.setBoolean(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
+mgr = new NullRMNodeLabelsManager();
+mgr.init(conf);
+  }
+
+  @Test(timeout = 30)
+  public void testSingleThreadAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(1);
+  }
+
+  @Test(timeout = 30)
+  public void testTwoThreadsAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(2);
+  }
+
+  @Test(timeout = 30)
+  public void testThreeThreadsAsyncContainerAllocation() throws Exception {
+testAsyncContainerAllocation(3);
+  }
+
+  public void testAsyncContainerAllocation(int numThreads) throws Exception {
+conf.setInt(
+CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_THREAD,
+numThreads);
+conf.setInt(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
++ ".scheduling-interval-ms", 100);
+
+final RMNodeLabelsManager mgr = new NullRMNodeLabelsManager();
+mgr.init(conf);
+
+// inject node label manager
+MockRM rm = new MockRM(TestUtils.getConfigurationWithMultipleQueues(conf)) 
{
+  @Override
+  public RMNodeLabelsManager createNodeLabelManager() {
+return mgr;
+  }
+};
+
+rm.getRMContext().setNodeLabelManager(mgr);
+rm.start();
+
+List nms = new ArrayList<>();
+// Add 10 nodes to the cluster, in the cluster we have 200 GB resource
+for (int i = 0; i < 10; i++) {
+  nms.add(rm.registerNode("h-" + i + ":1234", 20 * GB));
+}
+
+List ams = new ArrayList<>();
+// Add 3 applications to the cluster, one app in one queue
+// the i-th app ask (20 * i) containers. So in total we will have
+// 123G container allocated
+int totalAsked = 3 * GB; // 3 AMs
+
+for (int i = 0; i < 3; i++) {
+  RMApp rmApp = rm.submitApp(1024, "app", "user", null, false,
+  Character.toString((char) (i % 34 + 97)), 1, null, null, false);
+  

[2/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 684018c..ed47c30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -41,6 +41,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CyclicBarrier;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
@@ -78,6 +80,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestK
 
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue.User;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -196,6 +199,7 @@ public class TestLeafQueue {
 
 cs.setRMContext(spyRMContext);
 cs.init(csConf);
+cs.setResourceCalculator(rC);
 cs.start();
 
 when(spyRMContext.getScheduler()).thenReturn(cs);
@@ -268,6 +272,12 @@ public class TestLeafQueue {
 any(Resource.class), any(FiCaSchedulerApp.class), 
any(FiCaSchedulerNode.class), 
 any(RMContainer.class), any(ContainerStatus.class), 
 any(RMContainerEventType.class), any(CSQueue.class), anyBoolean());
+
+// Stub out parent queue's accept and apply.
+doReturn(true).when(parent).accept(any(Resource.class),
+any(ResourceCommitRequest.class));
+doNothing().when(parent).apply(any(Resource.class),
+any(ResourceCommitRequest.class));
 
 return queue;
   }
@@ -339,6 +349,12 @@ public class TestLeafQueue {
 FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0,
 8*GB);
 
+Map apps = ImmutableMap.of(
+app_0.getApplicationAttemptId(), app_0, 
app_1.getApplicationAttemptId(),
+app_1);
+Map nodes = ImmutableMap.of(node_0.getNodeID(),
+node_0);
+
 final int numNodes = 1;
 Resource clusterResource = 
 Resources.createResource(numNodes * (8*GB), numNodes * 16);
@@ -353,8 +369,10 @@ public class TestLeafQueue {
 // Start testing...
 
 // Only 1 container
-a.assignContainers(clusterResource, node_0, new ResourceLimits(
-clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+applyCSAssignment(clusterResource,
+a.assignContainers(clusterResource, node_0,
+new ResourceLimits(clusterResource),
+SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
 assertEquals(
 (int)(node_0.getTotalResource().getMemorySize() * a.getCapacity()) - 
(1*GB),
 a.getMetrics().getAvailableMB());
@@ -526,6 +544,12 @@ public class TestLeafQueue {
 FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0,
 8*GB);
 
+Map apps = ImmutableMap.of(
+app_0.getApplicationAttemptId(), app_0, 
app_1.getApplicationAttemptId(),
+app_1);
+Map nodes = ImmutableMap.of(node_0.getNodeID(),
+node_0);
+
 final int numNodes = 

[1/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 29c6a0be0 -> 6cdcab907


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
index 42a8872..d875969 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
@@ -49,8 +49,12 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -123,6 +127,27 @@ public class TestParentQueue {
 return application;
   }
 
+  private void applyAllocationToQueue(Resource clusterResource,
+  int allocatedMem,
+  CSQueue queue) {
+// Call accept & apply for queue
+ResourceCommitRequest request = mock(ResourceCommitRequest.class);
+when(request.anythingAllocatedOrReserved()).thenReturn(true);
+ContainerAllocationProposal allocation = mock(
+ContainerAllocationProposal.class);
+when(request.getTotalReleasedResource()).thenReturn(Resources.none());
+
when(request.getFirstAllocatedOrReservedContainer()).thenReturn(allocation);
+SchedulerContainer scontainer = mock(SchedulerContainer.class);
+when(allocation.getAllocatedOrReservedContainer()).thenReturn(scontainer);
+when(allocation.getAllocatedOrReservedResource()).thenReturn(
+Resources.createResource(allocatedMem));
+when(scontainer.getNodePartition()).thenReturn("");
+
+if (queue.accept(clusterResource, request)) {
+  queue.apply(clusterResource, request);
+}
+  }
+
   private void stubQueueAllocation(final CSQueue queue, 
   final Resource clusterResource, final FiCaSchedulerNode node, 
   final int allocation) {
@@ -157,7 +182,7 @@ public class TestParentQueue {
 // Next call - nothing
 if (allocation > 0) {
   doReturn(new CSAssignment(Resources.none(), type)).when(queue)
-  .assignContainers(eq(clusterResource), eq(node),
+  .assignContainers(eq(clusterResource), any(PlacementSet.class),
   any(ResourceLimits.class), any(SchedulingMode.class));
 
   // Mock the node's resource availability
@@ -168,7 +193,7 @@ public class TestParentQueue {
 
 return new CSAssignment(allocatedResource, type);
   }
-}).when(queue).assignContainers(eq(clusterResource), eq(node),
+}).when(queue).assignContainers(eq(clusterResource), 
any(PlacementSet.class),
 any(ResourceLimits.class), any(SchedulingMode.class));
   }
   
@@ -205,8 +230,8 @@ public class TestParentQueue {
 setupSingleLevelQueues(csConf);
 
 Map queues = new HashMap();
-CSQueue root = 
-CapacityScheduler.parseQueue(csContext, csConf, null, 
+CSQueue root =
+CapacityScheduler.parseQueue(csContext, csConf, null,
 CapacitySchedulerConfiguration.ROOT, queues, queues, 
 TestUtils.spyHook);
 
@@ -245,13 +270,18 @@ public class TestParentQueue {
 // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G
 stubQueueAllocation(a, clusterResource, node_1, 2*GB);
 

[5/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index a69af6e..fd0c68b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -47,8 +47,13 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.Activi
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesLogger;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivityState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.AllocationState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSetUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
 import java.io.IOException;
@@ -71,12 +76,10 @@ public class ParentQueue extends AbstractCSQueue {
 
   protected final Set childQueues;  
   private final boolean rootQueue;
-  final Comparator nonPartitionedQueueComparator;
-  final PartitionedQueueComparator partitionQueueComparator;
-  volatile int numApplications;
+  private final Comparator nonPartitionedQueueComparator;
+  private final PartitionedQueueComparator partitionQueueComparator;
+  private volatile int numApplications;
   private final CapacitySchedulerContext scheduler;
-  private boolean needToResortQueuesAtNextAllocation = false;
-  private int offswitchPerHeartbeatLimit;
 
   private final RecordFactory recordFactory = 
 RecordFactoryProvider.getRecordFactory(null);
@@ -86,7 +89,7 @@ public class ParentQueue extends AbstractCSQueue {
 super(cs, queueName, parent, old);
 this.scheduler = cs;
 this.nonPartitionedQueueComparator = cs.getNonPartitionedQueueComparator();
-this.partitionQueueComparator = cs.getPartitionedQueueComparator();
+this.partitionQueueComparator = new PartitionedQueueComparator();
 
 this.rootQueue = (parent == null);
 
@@ -126,16 +129,12 @@ public class ParentQueue extends AbstractCSQueue {
 }
   }
 
-  offswitchPerHeartbeatLimit =
-csContext.getConfiguration().getOffSwitchPerHeartbeatLimit();
-
   LOG.info(queueName + ", capacity=" + this.queueCapacities.getCapacity()
   + ", absoluteCapacity=" + this.queueCapacities.getAbsoluteCapacity()
   + ", maxCapacity=" + this.queueCapacities.getMaximumCapacity()
   + ", absoluteMaxCapacity=" + this.queueCapacities
   .getAbsoluteMaximumCapacity() + ", state=" + state + ", acls="
   + aclsString + ", labels=" + labelStrBuilder.toString() + "\n"
-  + ", offswitchPerHeartbeatLimit = " + getOffSwitchPerHeartbeatLimit()
   + ", reservationsContinueLooking=" + reservationsContinueLooking);
 } finally {
   writeLock.unlock();
@@ -215,11 +214,6 @@ public class ParentQueue extends AbstractCSQueue {
 
   }
 
-  @Private
-  public int getOffSwitchPerHeartbeatLimit() {
-return offswitchPerHeartbeatLimit;
-  }
-
   private QueueUserACLInfo getUserAclInfo(
   UserGroupInformation user) {
 try {
@@ -435,156 +429,145 @@ public class ParentQueue extends AbstractCSQueue {
 
   @Override
   public CSAssignment assignContainers(Resource clusterResource,
-  FiCaSchedulerNode node, ResourceLimits resourceLimits,
-  SchedulingMode schedulingMode) {
-int offswitchCount = 0;
-try {
-  writeLock.lock();
-  // if our queue cannot access this node, just return
-  if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY
-  && 

[4/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
new file mode 100644
index 000..8b4907b
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/SchedulerContainer.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common;
+
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey;
+
+/**
+ * Contexts for a container inside scheduler
+ */
+public class SchedulerContainer {
+  private RMContainer rmContainer;
+  private String nodePartition;
+  private A schedulerApplicationAttempt;
+  private N schedulerNode;
+  private boolean allocated; // Allocated (True) or reserved (False)
+
+  public SchedulerContainer(A app, N node, RMContainer rmContainer,
+  String nodePartition, boolean allocated) {
+this.schedulerApplicationAttempt = app;
+this.schedulerNode = node;
+this.rmContainer = rmContainer;
+this.nodePartition = nodePartition;
+this.allocated = allocated;
+  }
+
+  public String getNodePartition() {
+return nodePartition;
+  }
+
+  public RMContainer getRmContainer() {
+return rmContainer;
+  }
+
+  public A getSchedulerApplicationAttempt() {
+return schedulerApplicationAttempt;
+  }
+
+  public N getSchedulerNode() {
+return schedulerNode;
+  }
+
+  public boolean isAllocated() {
+return allocated;
+  }
+
+  public SchedulerRequestKey getSchedulerRequestKey() {
+if (rmContainer.getState() == RMContainerState.RESERVED) {
+  return rmContainer.getReservedSchedulerKey();
+}
+return rmContainer.getAllocatedSchedulerKey();
+  }
+
+  @Override
+  public String toString() {
+return "(Application=" + schedulerApplicationAttempt
+.getApplicationAttemptId() + "; Node=" + schedulerNode.getNodeID()
++ "; Resource=" + rmContainer.getAllocatedOrReservedResource() + ")";
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cdcab90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 6f4b1bf..6d9dda8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -19,14 +19,6 @@
 package 

[1/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk acd509dc5 -> de3b4aac5


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
index 42a8872..d875969 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java
@@ -49,8 +49,12 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsMana
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.PlacementSet;
 import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
@@ -123,6 +127,27 @@ public class TestParentQueue {
 return application;
   }
 
+  private void applyAllocationToQueue(Resource clusterResource,
+  int allocatedMem,
+  CSQueue queue) {
+// Call accept & apply for queue
+ResourceCommitRequest request = mock(ResourceCommitRequest.class);
+when(request.anythingAllocatedOrReserved()).thenReturn(true);
+ContainerAllocationProposal allocation = mock(
+ContainerAllocationProposal.class);
+when(request.getTotalReleasedResource()).thenReturn(Resources.none());
+
when(request.getFirstAllocatedOrReservedContainer()).thenReturn(allocation);
+SchedulerContainer scontainer = mock(SchedulerContainer.class);
+when(allocation.getAllocatedOrReservedContainer()).thenReturn(scontainer);
+when(allocation.getAllocatedOrReservedResource()).thenReturn(
+Resources.createResource(allocatedMem));
+when(scontainer.getNodePartition()).thenReturn("");
+
+if (queue.accept(clusterResource, request)) {
+  queue.apply(clusterResource, request);
+}
+  }
+
   private void stubQueueAllocation(final CSQueue queue, 
   final Resource clusterResource, final FiCaSchedulerNode node, 
   final int allocation) {
@@ -157,7 +182,7 @@ public class TestParentQueue {
 // Next call - nothing
 if (allocation > 0) {
   doReturn(new CSAssignment(Resources.none(), type)).when(queue)
-  .assignContainers(eq(clusterResource), eq(node),
+  .assignContainers(eq(clusterResource), any(PlacementSet.class),
   any(ResourceLimits.class), any(SchedulingMode.class));
 
   // Mock the node's resource availability
@@ -168,7 +193,7 @@ public class TestParentQueue {
 
 return new CSAssignment(allocatedResource, type);
   }
-}).when(queue).assignContainers(eq(clusterResource), eq(node),
+}).when(queue).assignContainers(eq(clusterResource), 
any(PlacementSet.class),
 any(ResourceLimits.class), any(SchedulingMode.class));
   }
   
@@ -205,8 +230,8 @@ public class TestParentQueue {
 setupSingleLevelQueues(csConf);
 
 Map queues = new HashMap();
-CSQueue root = 
-CapacityScheduler.parseQueue(csContext, csConf, null, 
+CSQueue root =
+CapacityScheduler.parseQueue(csContext, csConf, null,
 CapacitySchedulerConfiguration.ROOT, queues, queues, 
 TestUtils.spyHook);
 
@@ -245,13 +270,18 @@ public class TestParentQueue {
 // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G
 stubQueueAllocation(a, clusterResource, node_1, 2*GB);
 

[2/7] hadoop git commit: YARN-5716. Add global scheduler interface definition and update CapacityScheduler to use it. Contributed by Wangda Tan

2016-11-07 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3b4aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 51b567b..8694efb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -41,6 +41,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CyclicBarrier;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
@@ -78,6 +80,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestK
 
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue.User;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -196,6 +199,7 @@ public class TestLeafQueue {
 
 cs.setRMContext(spyRMContext);
 cs.init(csConf);
+cs.setResourceCalculator(rC);
 cs.start();
 
 when(spyRMContext.getScheduler()).thenReturn(cs);
@@ -268,6 +272,12 @@ public class TestLeafQueue {
 any(Resource.class), any(FiCaSchedulerApp.class), 
any(FiCaSchedulerNode.class), 
 any(RMContainer.class), any(ContainerStatus.class), 
 any(RMContainerEventType.class), any(CSQueue.class), anyBoolean());
+
+// Stub out parent queue's accept and apply.
+doReturn(true).when(parent).accept(any(Resource.class),
+any(ResourceCommitRequest.class));
+doNothing().when(parent).apply(any(Resource.class),
+any(ResourceCommitRequest.class));
 
 return queue;
   }
@@ -339,6 +349,12 @@ public class TestLeafQueue {
 FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0,
 8*GB);
 
+Map apps = ImmutableMap.of(
+app_0.getApplicationAttemptId(), app_0, 
app_1.getApplicationAttemptId(),
+app_1);
+Map nodes = ImmutableMap.of(node_0.getNodeID(),
+node_0);
+
 final int numNodes = 1;
 Resource clusterResource = 
 Resources.createResource(numNodes * (8*GB), numNodes * 16);
@@ -353,8 +369,10 @@ public class TestLeafQueue {
 // Start testing...
 
 // Only 1 container
-a.assignContainers(clusterResource, node_0, new ResourceLimits(
-clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
+applyCSAssignment(clusterResource,
+a.assignContainers(clusterResource, node_0,
+new ResourceLimits(clusterResource),
+SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
 assertEquals(
 (int)(node_0.getTotalResource().getMemorySize() * a.getCapacity()) - 
(1*GB),
 a.getMetrics().getAvailableMB());
@@ -526,6 +544,12 @@ public class TestLeafQueue {
 FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0,
 8*GB);
 
+Map apps = ImmutableMap.of(
+app_0.getApplicationAttemptId(), app_0, 
app_1.getApplicationAttemptId(),
+app_1);
+Map nodes = ImmutableMap.of(node_0.getNodeID(),
+node_0);
+
 final int numNodes = 

hadoop git commit: HADOOP-13795. Skip testGlobStatusThrowsExceptionForUnreadableDir in TestFSMainOperationsSwift. Contributed by John Zhuge.

2016-11-07 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk f76895573 -> acd509dc5


HADOOP-13795. Skip testGlobStatusThrowsExceptionForUnreadableDir in 
TestFSMainOperationsSwift. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/acd509dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/acd509dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/acd509dc

Branch: refs/heads/trunk
Commit: acd509dc57d6b8b3791a8332fec9bdf53a8f9d36
Parents: f768955
Author: Xiao Chen 
Authored: Mon Nov 7 09:21:01 2016 -0800
Committer: Xiao Chen 
Committed: Mon Nov 7 09:21:01 2016 -0800

--
 .../org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/acd509dc/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
--
diff --git 
a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
 
b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
index 74299df..b595f1c 100644
--- 
a/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
+++ 
b/hadoop-tools/hadoop-openstack/src/test/java/org/apache/hadoop/fs/swift/TestFSMainOperationsSwift.java
@@ -76,6 +76,12 @@ public class TestFSMainOperationsSwift extends 
FSMainOperationsBaseTest {
 
   @Test(timeout = SWIFT_TEST_TIMEOUT)
   @Override
+  public void testGlobStatusThrowsExceptionForUnreadableDir() {
+SwiftTestUtils.skip("unsupported");
+  }
+
+  @Test(timeout = SWIFT_TEST_TIMEOUT)
+  @Override
   public void testFsStatus() throws Exception {
 super.testFsStatus();
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11099: libhdfs++: Expose rack id in hdfsDNInfo. Contributed by Xiaowei Zhu.

2016-11-07 Thread jhc
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8707 4f3696d7e -> c252ac225


HDFS-11099: libhdfs++: Expose rack id in hdfsDNInfo.  Contributed by Xiaowei 
Zhu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c252ac22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c252ac22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c252ac22

Branch: refs/heads/HDFS-8707
Commit: c252ac22560e957f2b5bc0542514f3a7813886d2
Parents: 4f3696d
Author: James 
Authored: Mon Nov 7 10:41:59 2016 -0500
Committer: James 
Committed: Mon Nov 7 10:41:59 2016 -0500

--
 .../main/native/libhdfspp/include/hdfspp/block_location.h   | 9 +
 .../src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h | 1 +
 .../src/main/native/libhdfspp/lib/bindings/c/hdfs.cc| 5 +
 .../src/main/native/libhdfspp/lib/fs/filesystem.cc  | 2 ++
 .../src/main/native/libhdfspp/tests/hdfs_ext_test.cc| 1 +
 5 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c252ac22/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
index cbe34be..5a03f41 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/block_location.h
@@ -41,6 +41,14 @@ public:
 this->ip_addr = ip_addr;
   }
 
+  std::string getNetworkLocation() const {
+return network_location;
+  }
+
+  void setNetworkLocation(const std::string & location) {
+this->network_location = location;
+  }
+
   int getXferPort() const {
 return xfer_port;
   }
@@ -75,6 +83,7 @@ public:
 private:
   std::string hostname;
   std::string ip_addr;
+  std::string network_location;
   int xfer_port;
   int info_port;
   int IPC_port;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c252ac22/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
index b41857c..72434e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/include/hdfspp/hdfs_ext.h
@@ -132,6 +132,7 @@ int hdfsBuilderConfGetLong(struct hdfsBuilder *bld, const 
char *key, int64_t *va
 struct hdfsDNInfo {
   const char *ip_address;
   const char *hostname;
+  const char *network_location;
   int xfer_port;
   int info_port;
   int IPC_port;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c252ac22/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
index a43d94f..dd7d00c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/lib/bindings/c/hdfs.cc
@@ -1248,6 +1248,10 @@ int hdfsGetBlockLocations(hdfsFS fs, const char *path, 
struct hdfsBlockLocations
 buf = new char[ppDNInfo.getIPAddr().size() + 1];
 strncpy(buf, ppDNInfo.getIPAddr().c_str(), ppDNInfo.getIPAddr().size() 
+ 1);
 dn_info->ip_address = buf;
+
+buf = new char[ppDNInfo.getNetworkLocation().size() + 1];
+strncpy(buf, ppDNInfo.getNetworkLocation().c_str(), 
ppDNInfo.getNetworkLocation().size() + 1);
+dn_info->network_location = buf;
   }
 }
 
@@ -1270,6 +1274,7 @@ int hdfsFreeBlockLocations(struct hdfsBlockLocations * 
blockLocations) {
   auto location = >locations[j];
   delete[] location->hostname;
   delete[] location->ip_address;
+  delete[] location->network_location;
 }
   }
   delete[] blockLocations->blocks;


hadoop git commit: HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine Koppelt

2016-11-07 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3b279fe42 -> 29c6a0be0


HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine 
Koppelt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/29c6a0be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/29c6a0be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/29c6a0be

Branch: refs/heads/branch-2
Commit: 29c6a0be0d81f261c4ea39373186fe9ac1882e8e
Parents: 3b279fe
Author: Steve Loughran 
Authored: Mon Nov 7 12:36:10 2016 +
Committer: Steve Loughran 
Committed: Mon Nov 7 12:36:25 2016 +

--
 .../hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/29c6a0be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 61ff2e7..b6302ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -237,7 +237,7 @@ public abstract class Shell {
   /** Return a command to get permission information. */
   public static String[] getGetPermissionCommand() {
 return (WINDOWS) ? new String[] { getWinUtilsPath(), "ls", "-F" }
- : new String[] { "/bin/ls", "-ld" };
+ : new String[] { "ls", "-ld" };
   }
 
   /** Return a command to set permission. */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine Koppelt

2016-11-07 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk b970446b2 -> f76895573


HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine 
Koppelt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7689557
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7689557
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7689557

Branch: refs/heads/trunk
Commit: f76895573d0166b4b582ff69c3f9c159ab14661f
Parents: b970446
Author: Steve Loughran 
Authored: Mon Nov 7 12:36:10 2016 +
Committer: Steve Loughran 
Committed: Mon Nov 7 12:36:10 2016 +

--
 .../hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7689557/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 0745057..5fc9869 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -237,7 +237,7 @@ public abstract class Shell {
   /** Return a command to get permission information. */
   public static String[] getGetPermissionCommand() {
 return (WINDOWS) ? new String[] { getWinUtilsPath(), "ls", "-F" }
- : new String[] { "/bin/ls", "-ld" };
+ : new String[] { "ls", "-ld" };
   }
 
   /** Return a command to set permission. */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine Koppelt

2016-11-07 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 21f210bad -> 5231c527a


HADOOP-13797 Remove hardcoded absolute path for ls. Contributed by Christine 
Koppelt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5231c527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5231c527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5231c527

Branch: refs/heads/branch-2.8
Commit: 5231c527aaf19fb3f4bd59dcd2ab19bfb906d377
Parents: 21f210b
Author: Steve Loughran 
Authored: Mon Nov 7 12:36:10 2016 +
Committer: Steve Loughran 
Committed: Mon Nov 7 12:36:38 2016 +

--
 .../hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5231c527/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index 8034e2a..c6f5d4d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -237,7 +237,7 @@ public abstract class Shell {
   /** Return a command to get permission information. */
   public static String[] getGetPermissionCommand() {
 return (WINDOWS) ? new String[] { getWinUtilsPath(), "ls", "-F" }
- : new String[] { "/bin/ls", "-ld" };
+ : new String[] { "ls", "-ld" };
   }
 
   /** Return a command to set permission. */


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13798. TestHadoopArchives times out.

2016-11-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f30d33876 -> 3b279fe42


HADOOP-13798. TestHadoopArchives times out.

(cherry picked from commit b970446b2c59f8897bb2c3a562fa192ed3452db5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b279fe4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b279fe4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b279fe4

Branch: refs/heads/branch-2
Commit: 3b279fe4260d99f95787ea5cce428f9ef337a3cb
Parents: f30d338
Author: Akira Ajisaka 
Authored: Mon Nov 7 19:53:43 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 7 19:54:50 2016 +0900

--
 .../src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b279fe4/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index 165c515..e9ecf04 100644
--- 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -444,7 +444,7 @@ public class TestHadoopArchives {
   int read; 
   while (true) {
 read = fsdis.read(buffer, readIntoBuffer, buffer.length - 
readIntoBuffer);
-if (read < 0) {
+if (read <= 0) {
   // end of stream:
   if (readIntoBuffer > 0) {
 baos.write(buffer, 0, readIntoBuffer);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13798. TestHadoopArchives times out.

2016-11-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 ee5dd2c38 -> 21f210bad


HADOOP-13798. TestHadoopArchives times out.

(cherry picked from commit b970446b2c59f8897bb2c3a562fa192ed3452db5)
(cherry picked from commit 3b279fe4260d99f95787ea5cce428f9ef337a3cb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21f210ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21f210ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21f210ba

Branch: refs/heads/branch-2.8
Commit: 21f210bada414207665780ff8f203c72d41cc0f5
Parents: ee5dd2c
Author: Akira Ajisaka 
Authored: Mon Nov 7 19:53:43 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 7 19:55:10 2016 +0900

--
 .../src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21f210ba/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index 165c515..e9ecf04 100644
--- 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -444,7 +444,7 @@ public class TestHadoopArchives {
   int read; 
   while (true) {
 read = fsdis.read(buffer, readIntoBuffer, buffer.length - 
readIntoBuffer);
-if (read < 0) {
+if (read <= 0) {
   // end of stream:
   if (readIntoBuffer > 0) {
 baos.write(buffer, 0, readIntoBuffer);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13798. TestHadoopArchives times out.

2016-11-07 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk ca33bdd5c -> b970446b2


HADOOP-13798. TestHadoopArchives times out.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b970446b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b970446b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b970446b

Branch: refs/heads/trunk
Commit: b970446b2c59f8897bb2c3a562fa192ed3452db5
Parents: ca33bdd
Author: Akira Ajisaka 
Authored: Mon Nov 7 19:53:43 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Nov 7 19:53:43 2016 +0900

--
 .../src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b970446b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index 165c515..e9ecf04 100644
--- 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -444,7 +444,7 @@ public class TestHadoopArchives {
   int read; 
   while (true) {
 read = fsdis.read(buffer, readIntoBuffer, buffer.length - 
readIntoBuffer);
-if (read < 0) {
+if (read <= 0) {
   // end of stream:
   if (readIntoBuffer > 0) {
 baos.write(buffer, 0, readIntoBuffer);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org