hadoop git commit: YARN-5920. Fix deadlock in TestRMHA.testTransitionedToStandbyShouldNotHang. Contributed by Varun Saxena.

2016-11-23 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 87bb3c51d -> 6b47a7fb8


YARN-5920. Fix deadlock in TestRMHA.testTransitionedToStandbyShouldNotHang. 
Contributed by Varun Saxena.

(cherry picked from commit e15c20edba1e9a23475ee6a4dfbadbdb8c1f668a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b47a7fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b47a7fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b47a7fb

Branch: refs/heads/branch-2
Commit: 6b47a7fb8eaf0659c4ca478d72f4019912942e5d
Parents: 87bb3c5
Author: Rohith Sharma K S 
Authored: Thu Nov 24 12:18:38 2016 +0530
Committer: Rohith Sharma K S 
Committed: Thu Nov 24 12:19:19 2016 +0530

--
 .../org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java   | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b47a7fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index 905a42c..df1cdb5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -463,8 +463,7 @@ public class TestRMHA {
 
 MemoryRMStateStore memStore = new MemoryRMStateStore() {
   @Override
-  public synchronized void updateApplicationState(
-  ApplicationStateData appState) {
+  public void updateApplicationState(ApplicationStateData appState) {
 notifyStoreOperationFailed(new StoreFencedException());
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5920. Fix deadlock in TestRMHA.testTransitionedToStandbyShouldNotHang. Contributed by Varun Saxena.

2016-11-23 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 10468529a -> e15c20edb


YARN-5920. Fix deadlock in TestRMHA.testTransitionedToStandbyShouldNotHang. 
Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e15c20ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e15c20ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e15c20ed

Branch: refs/heads/trunk
Commit: e15c20edba1e9a23475ee6a4dfbadbdb8c1f668a
Parents: 1046852
Author: Rohith Sharma K S 
Authored: Thu Nov 24 12:18:38 2016 +0530
Committer: Rohith Sharma K S 
Committed: Thu Nov 24 12:18:38 2016 +0530

--
 .../org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java   | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e15c20ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
index 47c053c..000f4a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java
@@ -465,8 +465,7 @@ public class TestRMHA {
 
 MemoryRMStateStore memStore = new MemoryRMStateStore() {
   @Override
-  public synchronized void updateApplicationState(
-  ApplicationStateData appState) {
+  public void updateApplicationState(ApplicationStateData appState) {
 notifyStoreOperationFailed(new StoreFencedException());
   }
 };


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: YARN-4752. Improved preemption in FairScheduler. (kasha)

2016-11-23 Thread kasha
YARN-4752. Improved preemption in FairScheduler. (kasha)

Contains:
YARN-5605. Preempt containers (all on one node) to meet the requirement of 
starved applications
YARN-5821. Drop left-over preemption-related code and clean up method 
visibilities in the Schedulable hierarchy
YARN-5783. Verify identification of starved applications.
YARN-5819. Verify fairshare and minshare preemption
YARN-5885. Cleanup YARN-4752 branch for merge

Change-Id: Iee0962377d019dd64dc69a020725d2eaf360858c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10468529
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10468529
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10468529

Branch: refs/heads/trunk
Commit: 10468529a9b858bd945e7ecb063c9c1438efa474
Parents: c7a5f29
Author: Daniel Templeton 
Authored: Thu Sep 22 14:08:15 2016 -0700
Committer: Karthik Kambatla 
Committed: Wed Nov 23 19:48:59 2016 -1000

--
 .../hadoop/yarn/util/resource/Resources.java|4 +
 .../scheduler/AppSchedulingInfo.java|   17 +
 .../scheduler/SchedulerApplicationAttempt.java  |   16 +
 .../scheduler/common/fica/FiCaSchedulerApp.java |   16 +
 .../scheduler/fair/FSAppAttempt.java|  242 ++-
 .../scheduler/fair/FSContext.java   |   54 +
 .../scheduler/fair/FSLeafQueue.java |  283 ++--
 .../scheduler/fair/FSParentQueue.java   |   52 +-
 .../scheduler/fair/FSPreemptionThread.java  |  188 +++
 .../resourcemanager/scheduler/fair/FSQueue.java |   30 +-
 .../scheduler/fair/FSSchedulerNode.java |   36 +-
 .../scheduler/fair/FSStarvedApps.java   |   85 +
 .../scheduler/fair/FairScheduler.java   |  269 +--
 .../scheduler/fair/Schedulable.java |   29 +-
 .../scheduler/fair/FairSchedulerTestBase.java   |   39 +-
 .../fair/FairSchedulerWithMockPreemption.java   |   58 +
 .../scheduler/fair/FakeSchedulable.java |5 -
 .../scheduler/fair/TestFSAppStarvation.java |  256 +++
 .../scheduler/fair/TestFSLeafQueue.java |  165 +-
 .../fair/TestFairSchedulerPreemption.java   | 1540 ++
 .../fair/TestQueueManagerRealScheduler.java |  128 ++
 .../scheduler/fair/TestSchedulingPolicy.java|5 -
 22 files changed, 1444 insertions(+), 2073 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10468529/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
index 760b0ea..462e02a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java
@@ -143,6 +143,10 @@ public class Resources {
   public static Resource none() {
 return NONE;
   }
+
+  public static boolean isNone(Resource other) {
+return NONE.equals(other);
+  }
   
   public static Resource unbounded() {
 return UNBOUNDED;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10468529/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 80811b1..feb20ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -624,6 +624,23 @@ public class AppSchedulingInfo {
   }
 
   /**
+   * Method to return the next resource request to be serviced.
+   *
+   * In the initial implementation, we just pick any {@link ResourceRequest}
+   * corresponding to the highest priority.
+   *
+   * @return next {@link ResourceRequest} to allocate resources 

[1/3] hadoop git commit: YARN-4752. Improved preemption in FairScheduler. (kasha)

2016-11-23 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk c7a5f2906 -> 10468529a


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10468529/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
index 2cbe507..36ee685 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
@@ -17,1467 +17,259 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.Container;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.event.AsyncDispatcher;
-import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
-
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerRequestKey;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
-.TestUtils;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerPreemptEvent;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType;
-import org.apache.hadoop.yarn.util.ControlledClock;
-import org.apache.hadoop.yarn.util.resource.Resources;
-
 import org.junit.After;
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import java.util.Arrays;
+import java.util.Collection;
 
+/**
+ * Tests to verify fairshare and minshare preemption, using parameterization.
+ */
+@RunWith(Parameterized.class)
 public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
-  private final static String ALLOC_FILE = new File(TEST_DIR,
-  TestFairSchedulerPreemption.class.getName() + ".xml").getAbsolutePath();
+  private static final File ALLOC_FILE = new File(TEST_DIR, "test-queues");
 
-  private ControlledClock clock;
+  // Node Capacity = NODE_CAPACITY_MULTIPLE * (1 GB or 1 vcore)
+  private static final int NODE_CAPACITY_MULTIPLE = 4;
 
-  private static class StubbedFairScheduler extends FairScheduler {
-public long lastPreemptMemory = -1;
+  private final boolean fairsharePreemption;
 
-@Override
-protected void preemptResources(Resource toPreempt) {
-  lastPreemptMemory = toPreempt.getMemorySize();
-}
+  // App that takes up the entire cluster
+  private FSAppAttempt greedyApp;
 
-public void resetLastPreemptResources() {
-  lastPreemptMemory = -1;
-}
+  // Starving app that is expected to instigate preemption
+  private FSAppAttempt starvingApp;
+
+  @Parameterized.Parameters
+  public static Collection getParameters() {
+return Arrays.asList(new Boolean[][] {
+{true}, {false}});
   

[2/3] hadoop git commit: YARN-4752. Improved preemption in FairScheduler. (kasha)

2016-11-23 Thread kasha
http://git-wip-us.apache.org/repos/asf/hadoop/blob/10468529/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index 8e6272a..992b75d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -17,14 +17,6 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import org.junit.Assert;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -39,7 +31,9 @@ import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -50,9 +44,17 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptM
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
+import org.junit.Assert;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
 public class FairSchedulerTestBase {
   public final static String TEST_DIR =
   new File(System.getProperty("test.build.data", 
"/tmp")).getAbsolutePath();
@@ -70,9 +72,14 @@ public class FairSchedulerTestBase {
   private static final int SLEEP_DURATION = 10;
   private static final int SLEEP_RETRIES = 1000;
 
+  /**
+   * The list of nodes added to the cluster using the {@link #addNode} method.
+   */
+  protected final List rmNodes = new ArrayList<>();
+
   // Helper methods
   public Configuration createConfiguration() {
-Configuration conf = new YarnConfiguration();
+conf = new YarnConfiguration();
 conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
 ResourceScheduler.class);
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
@@ -280,4 +287,18 @@ public class FairSchedulerTestBase {
 Assert.assertEquals(resource.getVirtualCores(),
 app.getCurrentConsumption().getVirtualCores());
   }
+
+  /**
+   * Add a node to the cluster and track the nodes in {@link #rmNodes}.
+   * @param memory memory capacity of the node
+   * @param cores cpu capacity of the node
+   */
+  protected void addNode(int memory, int cores) {
+int id = rmNodes.size() + 1;
+RMNode node =
+MockNodes.newNodeInfo(1, Resources.createResource(memory, cores), id,
+"127.0.0." + id);
+scheduler.handle(new NodeAddedSchedulerEvent(node));
+rmNodes.add(node);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10468529/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerWithMockPreemption.java
--
diff --git 

hadoop git commit: Reverting for fixing compilation errors in branch-2. Revert "YARN-5649. Add REST endpoints for updating application timeouts. Contributed by Rohith Sharma K S"

2016-11-23 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 57b17ae80 -> 87bb3c51d


Reverting for fixing compilation errors in branch-2.
Revert "YARN-5649. Add REST endpoints for updating application timeouts. 
Contributed by Rohith Sharma K S"

This reverts commit 43796580421346f254d066c0c58ae13937cdbd45.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87bb3c51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87bb3c51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87bb3c51

Branch: refs/heads/branch-2
Commit: 87bb3c51d6225486fbe2006b77121b1058a45385
Parents: 57b17ae
Author: Rohith Sharma K S 
Authored: Thu Nov 24 09:18:07 2016 +0530
Committer: Rohith Sharma K S 
Committed: Thu Nov 24 09:18:07 2016 +0530

--
 .../server/resourcemanager/ClientRMService.java |   2 +-
 .../server/resourcemanager/RMAuditLogger.java   |   1 -
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../webapp/JAXBContextResolver.java |   3 +-
 .../resourcemanager/webapp/RMWebServices.java   | 186 ---
 .../resourcemanager/webapp/dao/AppInfo.java |  24 ---
 .../webapp/dao/AppTimeoutInfo.java  |  71 ---
 .../webapp/dao/AppTimeoutsInfo.java |  47 -
 .../server/resourcemanager/rmapp/MockRMApp.java |   3 +-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 .../TestRMWebServicesAppsModification.java  | 137 +-
 11 files changed, 6 insertions(+), 472 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bb3c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 8a0f373..21a9d13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1734,7 +1734,7 @@ public class ClientRMService extends AbstractService 
implements
   RMAuditLogger.logFailure(callerUGI.getShortUserName(),
   AuditConstants.UPDATE_APP_TIMEOUTS, "UNKNOWN", "ClientRMService",
   ex.getMessage());
-  throw ex;
+  throw RPCUtil.getRemoteException(ex);
 }
 
 RMAuditLogger.logSuccess(callerUGI.getShortUserName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bb3c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
index 051d979..d52e002 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
@@ -66,7 +66,6 @@ public class RMAuditLogger {
 "Update Application Priority";
 public static final String UPDATE_APP_TIMEOUTS =
 "Update Application Timeouts";
-public static final String GET_APP_TIMEOUTS = "Get Application Timeouts";
 public static final String CHANGE_CONTAINER_RESOURCE =
 "AM Changed Container Resource";
 public static final String SIGNAL_CONTAINER = "Signal Container Request";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bb3c51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 

[1/3] hadoop git commit: HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS load. Contributed by Steve Loughran

2016-11-23 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 437965804 -> 57b17ae80


http://git-wip-us.apache.org/repos/asf/hadoop/blob/57b17ae8/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index dd788f3..b87ee4c 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -416,7 +416,7 @@ If the filesystem is not location aware, it SHOULD return
 BlockLocation(["localhost:50010"] ,
   ["localhost"],
   ["/default/localhost"]
-   0, F.getLen())
+   0, f.getLen())
] ;
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/57b17ae8/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
index 07b07dc..214f070 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
@@ -95,16 +95,14 @@ public class TestFileSystemCaching {
 try {
   fs = FileSystem.get(URI.create("//host"), conf);
   fail("got fs with auth but no scheme");
-} catch (Exception e) {
-  assertEquals("No FileSystem for scheme: null", e.getMessage());
+} catch (UnsupportedFileSystemException e) {
 }
 
 // no scheme, different auth
 try {
   fs = FileSystem.get(URI.create("//host2"), conf);
   fail("got fs with auth but no scheme");
-} catch (Exception e) {
-  assertEquals("No FileSystem for scheme: null", e.getMessage());
+} catch (UnsupportedFileSystemException e) {
 }
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS load. Contributed by Steve Loughran

2016-11-23 Thread liuml07
http://git-wip-us.apache.org/repos/asf/hadoop/blob/57b17ae8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 63bd38a..ab7cd6a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -74,6 +74,8 @@ import org.apache.htrace.core.TraceScope;
 
 import com.google.common.base.Preconditions;
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
@@ -87,79 +89,117 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
  * 
  *
  * All user code that may potentially use the Hadoop Distributed
- * File System should be written to use a FileSystem object.  The
- * Hadoop DFS is a multi-machine system that appears as a single
- * disk.  It's useful because of its fault tolerance and potentially
- * very large capacity.
- * 
+ * File System should be written to use a FileSystem object or its
+ * successor, {@link FileContext}.
+ *
  * 
  * The local implementation is {@link LocalFileSystem} and distributed
- * implementation is DistributedFileSystem.
+ * implementation is DistributedFileSystem. There are other implementations
+ * for object stores and (outside the Apache Hadoop codebase),
+ * third party filesystems.
+ * 
+ * Notes
+ * 
+ * The behaviour of the filesystem is
+ * https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/filesystem/filesystem.html;>
+ * specified in the Hadoop documentation. 
+ * However, the normative specification of the behavior of this class is
+ * actually HDFS: if HDFS does not behave the way these Javadocs or
+ * the specification in the Hadoop documentations define, assume that
+ * the documentation is incorrect.
+ * 
+ * The term {@code FileSystem} refers to an instance of this class.
+ * The acronym "FS" is used as an abbreviation of FileSystem.
+ * The term {@code filesystem} refers to the distributed/local filesystem
+ * itself, rather than the class used to interact with it.
+ * The term "file" refers to a file in the remote filesystem,
+ * rather than instances of {@code java.io.File}.
+ * 
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public abstract class FileSystem extends Configured implements Closeable {
-  public static final String FS_DEFAULT_NAME_KEY = 
+  public static final String FS_DEFAULT_NAME_KEY =
CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
-  public static final String DEFAULT_FS = 
+  public static final String DEFAULT_FS =
CommonConfigurationKeys.FS_DEFAULT_NAME_DEFAULT;
 
+  /**
+   * This log is widely used in the org.apache.hadoop.fs code and tests,
+   * so must be considered something to only be changed with care.
+   */
+  @InterfaceAudience.Private
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
 
   /**
-   * Priority of the FileSystem shutdown hook.
+   * The SLF4J logger to use in logging within the FileSystem class itself.
+   */
+  private static final Logger LOGGER =
+  LoggerFactory.getLogger(FileSystem.class);
+
+  /**
+   * Priority of the FileSystem shutdown hook: {@value}.
*/
   public static final int SHUTDOWN_HOOK_PRIORITY = 10;
 
+  /**
+   * Prefix for trash directory: {@value}.
+   */
   public static final String TRASH_PREFIX = ".Trash";
   public static final String USER_HOME_PREFIX = "/user";
 
-  /** FileSystem cache */
+  /** FileSystem cache. */
   static final Cache CACHE = new Cache();
 
   /** The key this instance is stored under in the cache. */
   private Cache.Key key;
 
-  /** Recording statistics per a FileSystem class */
-  private static final Map 
-statisticsTable =
-  new IdentityHashMap();
-  
+  /** Recording statistics per a FileSystem class. */
+  private static final Map
+  statisticsTable = new IdentityHashMap<>();
+
   /**
* The statistics for this file system.
*/
   protected Statistics statistics;
 
   /**
-   * A cache of files that should be deleted when filsystem is closed
+   * A cache of files that should be deleted when the FileSystem is closed
* or the JVM is exited.
*/
-  private Set 

[3/3] hadoop git commit: HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS load. Contributed by Steve Loughran

2016-11-23 Thread liuml07
HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS 
load. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/57b17ae8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/57b17ae8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/57b17ae8

Branch: refs/heads/branch-2
Commit: 57b17ae8002b14cefd8c84eece1231f9c67606e2
Parents: 4379658
Author: Mingliang Liu 
Authored: Wed Nov 23 16:45:03 2016 -0800
Committer: Mingliang Liu 
Committed: Wed Nov 23 17:15:55 2016 -0800

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 1458 +++---
 .../src/site/markdown/filesystem/filesystem.md  |2 +-
 .../apache/hadoop/fs/TestFileSystemCaching.java |6 +-
 3 files changed, 908 insertions(+), 558 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13766. Fix a typo in the comments of RPC.getProtocolVersion. Contributed by Ethan Li.

2016-11-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 860d49aa6 -> c7a5f2906


HADOOP-13766. Fix a typo in the comments of RPC.getProtocolVersion. Contributed 
by Ethan Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7a5f290
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7a5f290
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7a5f290

Branch: refs/heads/trunk
Commit: c7a5f2906fcfd073a402b6981b091bd6d9b80294
Parents: 860d49a
Author: Andrew Wang 
Authored: Wed Nov 23 17:08:38 2016 -0800
Committer: Andrew Wang 
Committed: Wed Nov 23 17:08:38 2016 -0800

--
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7a5f290/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index a62748e..e16a8f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -158,8 +158,9 @@ public class RPC {
   
   /**
* Get the protocol version from protocol class.
-   * If the protocol class has a ProtocolAnnotation, then get the protocol
-   * name from the annotation; otherwise the class name is the protocol name.
+   * If the protocol class has a ProtocolAnnotation,
+   * then get the protocol version from the annotation;
+   * otherwise get it from the versionID field of the protocol class.
*/
   static public long getProtocolVersion(Class protocol) {
 if (protocol == null) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS load. Contributed by Steve Loughran

2016-11-23 Thread liuml07
http://git-wip-us.apache.org/repos/asf/hadoop/blob/860d49aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 9e98455..f581f61 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -75,6 +75,8 @@ import org.apache.htrace.core.TraceScope;
 
 import com.google.common.base.Preconditions;
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
@@ -88,79 +90,118 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
  * 
  *
  * All user code that may potentially use the Hadoop Distributed
- * File System should be written to use a FileSystem object.  The
- * Hadoop DFS is a multi-machine system that appears as a single
- * disk.  It's useful because of its fault tolerance and potentially
- * very large capacity.
- * 
+ * File System should be written to use a FileSystem object or its
+ * successor, {@link FileContext}.
+ *
  * 
  * The local implementation is {@link LocalFileSystem} and distributed
- * implementation is DistributedFileSystem.
+ * implementation is DistributedFileSystem. There are other implementations
+ * for object stores and (outside the Apache Hadoop codebase),
+ * third party filesystems.
+ * 
+ * Notes
+ * 
+ * The behaviour of the filesystem is
+ * https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/filesystem/filesystem.html;>
+ * specified in the Hadoop documentation. 
+ * However, the normative specification of the behavior of this class is
+ * actually HDFS: if HDFS does not behave the way these Javadocs or
+ * the specification in the Hadoop documentations define, assume that
+ * the documentation is incorrect.
+ * 
+ * The term {@code FileSystem} refers to an instance of this class.
+ * The acronym "FS" is used as an abbreviation of FileSystem.
+ * The term {@code filesystem} refers to the distributed/local filesystem
+ * itself, rather than the class used to interact with it.
+ * The term "file" refers to a file in the remote filesystem,
+ * rather than instances of {@code java.io.File}.
+ * 
  */
+@SuppressWarnings("DeprecatedIsStillUsed")
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public abstract class FileSystem extends Configured implements Closeable {
-  public static final String FS_DEFAULT_NAME_KEY = 
+  public static final String FS_DEFAULT_NAME_KEY =
CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
-  public static final String DEFAULT_FS = 
+  public static final String DEFAULT_FS =
CommonConfigurationKeys.FS_DEFAULT_NAME_DEFAULT;
 
+  /**
+   * This log is widely used in the org.apache.hadoop.fs code and tests,
+   * so must be considered something to only be changed with care.
+   */
+  @InterfaceAudience.Private
   public static final Log LOG = LogFactory.getLog(FileSystem.class);
 
   /**
-   * Priority of the FileSystem shutdown hook.
+   * The SLF4J logger to use in logging within the FileSystem class itself.
+   */
+  private static final Logger LOGGER =
+  LoggerFactory.getLogger(FileSystem.class);
+
+  /**
+   * Priority of the FileSystem shutdown hook: {@value}.
*/
   public static final int SHUTDOWN_HOOK_PRIORITY = 10;
 
+  /**
+   * Prefix for trash directory: {@value}.
+   */
   public static final String TRASH_PREFIX = ".Trash";
   public static final String USER_HOME_PREFIX = "/user";
 
-  /** FileSystem cache */
+  /** FileSystem cache. */
   static final Cache CACHE = new Cache();
 
   /** The key this instance is stored under in the cache. */
   private Cache.Key key;
 
-  /** Recording statistics per a FileSystem class */
-  private static final Map 
-statisticsTable =
-  new IdentityHashMap();
-  
+  /** Recording statistics per a FileSystem class. */
+  private static final Map
+  statisticsTable = new IdentityHashMap<>();
+
   /**
* The statistics for this file system.
*/
   protected Statistics statistics;
 
   /**
-   * A cache of files that should be deleted when filesystem is closed
+   * A cache of files that should be deleted when the FileSystem is closed
* or the 

[3/3] hadoop git commit: HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS load. Contributed by Steve Loughran

2016-11-23 Thread liuml07
HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS 
load. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/860d49aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/860d49aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/860d49aa

Branch: refs/heads/trunk
Commit: 860d49aa6ae7790d970d7f2322fed890b0e5cda2
Parents: de48949
Author: Mingliang Liu 
Authored: Wed Nov 23 16:32:42 2016 -0800
Committer: Mingliang Liu 
Committed: Wed Nov 23 16:42:27 2016 -0800

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 1449 +++---
 .../src/site/markdown/filesystem/filesystem.md  |2 +-
 .../org/apache/hadoop/fs/TestDefaultUri.java|   40 +-
 .../apache/hadoop/fs/TestFileSystemCaching.java |8 +-
 4 files changed, 920 insertions(+), 579 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-13605. Clean up FileSystem javadocs, logging; improve diagnostics on FS load. Contributed by Steve Loughran

2016-11-23 Thread liuml07
Repository: hadoop
Updated Branches:
  refs/heads/trunk de4894936 -> 860d49aa6


http://git-wip-us.apache.org/repos/asf/hadoop/blob/860d49aa/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
index b18b5f6..201d397 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md
@@ -419,7 +419,7 @@ If the filesystem is not location aware, it SHOULD return
 BlockLocation(["localhost:9866"] ,
   ["localhost"],
   ["/default/localhost"]
-   0, F.getLen())
+   0, f.getLen())
] ;
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/860d49aa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java
index f232735..b84d66a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java
@@ -21,14 +21,14 @@ import static 
org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
 import static org.hamcrest.CoreMatchers.instanceOf;
 import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertThat;
-import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.GenericTestUtils;
+
 import org.junit.Test;
+import static org.apache.hadoop.test.LambdaTestUtils.*;
 
 /**
  * Test default URI related APIs in {@link FileSystem}.
@@ -69,15 +69,12 @@ public class TestDefaultUri {
   }
 
   @Test
-  public void tetGetDefaultUriNoSchemeTrailingSlash() {
+  public void tetGetDefaultUriNoSchemeTrailingSlash() throws Exception {
 conf.set(FS_DEFAULT_NAME_KEY, "nn_host/");
-try {
-  FileSystem.getDefaultUri(conf);
-  fail("Expect IAE: No scheme in default FS");
-} catch (IllegalArgumentException e) {
-  GenericTestUtils.assertExceptionContains(
-  "No scheme in default FS", e);
-}
+intercept(IllegalArgumentException.class,
+"No scheme in default FS",
+() -> FileSystem.getDefaultUri(conf));
+
   }
 
   @Test
@@ -88,28 +85,19 @@ public class TestDefaultUri {
   }
 
   @Test
-  public void tetFsGetNoScheme() throws IOException {
+  public void tetFsGetNoScheme() throws Exception {
 // Bare host name or address indicates hdfs scheme
 conf.set(FS_DEFAULT_NAME_KEY, "nn_host");
-try {
-  FileSystem.get(conf);
-  fail("Expect IOE: No FileSystem for scheme: hdfs");
-} catch (IOException e) {
-  GenericTestUtils.assertExceptionContains(
-  "No FileSystem for scheme: hdfs", e);
-}
+intercept(UnsupportedFileSystemException.class, "hdfs",
+() -> FileSystem.get(conf));
   }
 
   @Test
-  public void tetFsGetNoSchemeTrailingSlash() throws IOException {
+  public void tetFsGetNoSchemeTrailingSlash() throws Exception {
 // Bare host name or address with trailing slash is invalid
 conf.set(FS_DEFAULT_NAME_KEY, "nn_host/");
-try {
-  FileSystem.get(conf);
-  fail("Expect IAE: No scheme in default FS");
-} catch (IllegalArgumentException e) {
-  GenericTestUtils.assertExceptionContains(
-  "No scheme in default FS", e);
-}
+intercept(IllegalArgumentException.class,
+"No scheme in default FS",
+() -> FileSystem.get(conf));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/860d49aa/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
index 07b07dc..69ef71e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
@@ -95,16 +95,14 @@ public class TestFileSystemCaching {
 try {
   fs = FileSystem.get(URI.create("//host"), conf);
   fail("got fs with auth but no scheme");
-} catch (Exception e) {
-  assertEquals("No FileSystem for scheme: null", e.getMessage());
+ 

[1/2] hadoop git commit: HDFS-11368. Erasure Coding: Deprecate replication-related config keys. Contributed by Rakesh R.

2016-11-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk dd98a8005 -> de4894936


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de489493/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index 4fc5029..dc7f47a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -142,8 +142,9 @@ public class TestPipelinesFailover {
   MethodToTestIdempotence methodToTest) throws Exception {
 Configuration conf = new Configuration();
 conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
-// Don't check replication periodically.
-conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
+// Don't check low redundancy periodically.
+conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+1000);
 
 FSDataOutputStream stm = null;
 MiniDFSCluster cluster = newMiniCluster(conf, 3);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de489493/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
index 110615d..c11f880 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
@@ -45,7 +45,7 @@ public class TestNNMetricFilesInGetListingOps {
 CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
 CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
 CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
-CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
+CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
   }
  
   private MiniDFSCluster cluster;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de489493/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 90d61ee..0ad6130 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -78,7 +78,7 @@ import org.junit.Test;
  */
 public class TestNameNodeMetrics {
   private static final Configuration CONF = new HdfsConfiguration();
-  private static final int DFS_REPLICATION_INTERVAL = 1;
+  private static final int DFS_REDUNDANCY_INTERVAL = 1;
   private static final Path TEST_ROOT_DIR_PATH = 
 new Path("/testNameNodeMetrics");
   private static final String NN_METRICS = "NameNodeActivity";
@@ -96,9 +96,9 @@ public class TestNameNodeMetrics {
 CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
 CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
 CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
-DFS_REPLICATION_INTERVAL);
-CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
-DFS_REPLICATION_INTERVAL);
+DFS_REDUNDANCY_INTERVAL);
+CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+DFS_REDUNDANCY_INTERVAL);
 CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, 
 "" + PERCENTILES_INTERVAL);
 // Enable stale DataNodes checking
@@ -333,7 +333,7 @@ public class TestNameNodeMetrics {
   private void waitForDeletion() throws InterruptedException {
 // Wait for more than DATANODE_COUNT replication intervals to ensure all
 // the blocks pending deletion are sent for deletion to the datanodes.
-Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
+

[2/2] hadoop git commit: HDFS-11368. Erasure Coding: Deprecate replication-related config keys. Contributed by Rakesh R.

2016-11-23 Thread wang
HDFS-11368. Erasure Coding: Deprecate replication-related config keys. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de489493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de489493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de489493

Branch: refs/heads/trunk
Commit: de4894936a5b581572f35fa5b8979d9f23da0891
Parents: dd98a80
Author: Andrew Wang 
Authored: Wed Nov 23 16:42:06 2016 -0800
Committer: Andrew Wang 
Committed: Wed Nov 23 16:42:06 2016 -0800

--
 .../src/site/markdown/DeprecatedProperties.md   |  7 ++-
 .../apache/hadoop/hdfs/HdfsConfiguration.java   | 10 +++-
 .../hdfs/client/HdfsClientConfigKeys.java   | 10 ++--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 19 +++---
 .../hadoop/hdfs/server/balancer/Balancer.java   |  4 +-
 .../server/blockmanagement/BlockManager.java| 62 
 .../BlockPlacementPolicyDefault.java|  8 +--
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  4 +-
 .../hadoop/hdfs/server/namenode/BackupNode.java |  4 +-
 .../src/main/resources/hdfs-default.xml |  8 +--
 .../apache/hadoop/hdfs/AdminStatesBaseTest.java |  4 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |  2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  2 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  2 +-
 .../hdfs/TestDecommissionWithStriped.java   | 10 ++--
 .../apache/hadoop/hdfs/TestDeprecatedKeys.java  | 22 ++-
 .../hadoop/hdfs/TestEncryptedTransfer.java  |  2 +-
 .../org/apache/hadoop/hdfs/TestFileAppend4.java |  2 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java|  2 +-
 .../hadoop/hdfs/TestLeaseRecoveryStriped.java   |  2 +-
 .../hadoop/hdfs/TestMissingBlocksAlert.java |  3 +-
 .../hdfs/TestReadStripedFileWithDecoding.java   |  3 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  4 +-
 .../hdfs/TestReplaceDatanodeOnFailure.java  |  3 +-
 .../org/apache/hadoop/hdfs/TestReplication.java |  3 +-
 .../hadoop/hdfs/TestWriteReadStripedFile.java   |  2 +-
 .../hdfs/server/balancer/TestBalancer.java  | 26 
 .../blockmanagement/BlockManagerTestUtil.java   | 21 ---
 .../TestBlocksWithNotEnoughRacks.java   |  4 +-
 .../server/blockmanagement/TestNodeCount.java   |  2 +-
 .../TestPendingInvalidateBlock.java |  2 +-
 .../TestPendingReconstruction.java  |  2 +-
 ...constructStripedBlocksWithRackAwareness.java |  8 +--
 .../blockmanagement/TestReplicationPolicy.java  |  2 +-
 .../TestReplicationPolicyConsiderLoad.java  |  2 +-
 .../TestDataNodeErasureCodingMetrics.java   |  2 +-
 .../fsdataset/impl/TestLazyPersistFiles.java| 12 ++--
 .../hadoop/hdfs/server/mover/TestMover.java |  9 ++-
 .../hdfs/server/mover/TestStorageMover.java |  4 +-
 .../server/namenode/NNThroughputBenchmark.java  | 10 ++--
 .../TestAddOverReplicatedStripedBlocks.java |  2 +-
 .../namenode/TestDecommissioningStatus.java |  5 +-
 .../server/namenode/TestFSEditLogLoader.java|  2 +-
 .../hdfs/server/namenode/TestHostsFiles.java|  4 +-
 .../hdfs/server/namenode/TestMetaSave.java  |  5 +-
 .../namenode/TestReconstructStripedBlocks.java  |  9 +--
 .../server/namenode/TestStripedINodeFile.java   |  5 +-
 .../hdfs/server/namenode/ha/TestDNFencing.java  | 11 ++--
 .../ha/TestDNFencingWithReplication.java|  2 +-
 .../namenode/ha/TestPipelinesFailover.java  |  5 +-
 .../TestNNMetricFilesInGetListingOps.java   |  2 +-
 .../namenode/metrics/TestNameNodeMetrics.java   | 12 ++--
 52 files changed, 216 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de489493/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
index 8de1be2..38a1954 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
@@ -43,8 +43,11 @@ The following table lists the configuration property names 
that are deprecated i
 | dfs.permissions | dfs.permissions.enabled |
 | dfs.permissions.supergroup | dfs.permissions.superusergroup |
 | dfs.read.prefetch.size | dfs.client.read.prefetch.size |
-| dfs.replication.considerLoad | dfs.namenode.replication.considerLoad |
-| dfs.replication.interval | dfs.namenode.replication.interval |
+| dfs.replication.considerLoad | dfs.namenode.redundancy.considerLoad |
+| dfs.namenode.replication.considerLoad | dfs.namenode.redundancy.considerLoad 
|
+| 

hadoop git commit: HDFS-11058. Implement 'hadoop fs -df' command for ViewFileSystem. Contributed by Manoj Govindassamy.

2016-11-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1f12867a6 -> dd98a8005


HDFS-11058. Implement 'hadoop fs -df' command for ViewFileSystem. Contributed 
by Manoj Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd98a800
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd98a800
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd98a800

Branch: refs/heads/trunk
Commit: dd98a8005ad8939ffb6faba1ff0170387e91a8de
Parents: 1f12867
Author: Andrew Wang 
Authored: Wed Nov 23 16:40:39 2016 -0800
Committer: Andrew Wang 
Committed: Wed Nov 23 16:40:39 2016 -0800

--
 .../org/apache/hadoop/fs/shell/FsUsage.java | 124 ++
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  56 ---
 .../hadoop/fs/viewfs/ViewFileSystemUtil.java| 164 +++
 .../hadoop/fs/viewfs/TestViewfsFileStatus.java  |   9 +-
 .../fs/viewfs/ViewFileSystemBaseTest.java   |  74 -
 .../fs/viewfs/TestViewFileSystemHdfs.java   |  24 +++
 6 files changed, 395 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd98a800/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
index 197920f..6b1d7e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
@@ -20,19 +20,24 @@ package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
+import org.apache.hadoop.fs.viewfs.ViewFileSystemUtil;
 import org.apache.hadoop.util.StringUtils;
 
-/** Base class for commands related to viewing filesystem usage, such as
- * du and df
+/**
+ * Base class for commands related to viewing filesystem usage,
+ * such as du and df.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -44,15 +49,27 @@ class FsUsage extends FsCommand {
 factory.addClass(Dus.class, "-dus");
   }
 
-  protected boolean humanReadable = false;
-  protected TableBuilder usagesTable;
-  
+  private boolean humanReadable = false;
+  private TableBuilder usagesTable;
+
   protected String formatSize(long size) {
 return humanReadable
 ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
 : String.valueOf(size);
   }
 
+  public TableBuilder getUsagesTable() {
+return usagesTable;
+  }
+
+  public void setUsagesTable(TableBuilder usagesTable) {
+this.usagesTable = usagesTable;
+  }
+
+  public void setHumanReadable(boolean humanReadable) {
+this.humanReadable = humanReadable;
+  }
+
   /** Show the size of a partition in the filesystem */
   public static class Df extends FsUsage {
 public static final String NAME = "df";
@@ -70,38 +87,74 @@ class FsUsage extends FsCommand {
 throws IOException {
   CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h");
   cf.parse(args);
-  humanReadable = cf.getOpt("h");
+  setHumanReadable(cf.getOpt("h"));
   if (args.isEmpty()) args.add(Path.SEPARATOR);
 }
 
 @Override
 protected void processArguments(LinkedList args)
 throws IOException {
-  usagesTable = new TableBuilder(
-  "Filesystem", "Size", "Used", "Available", "Use%");
-  usagesTable.setRightAlign(1, 2, 3, 4);
-  
+  setUsagesTable(new TableBuilder(
+  "Filesystem", "Size", "Used", "Available", "Use%", "Mounted on"));
+  getUsagesTable().setRightAlign(1, 2, 3, 4);
+
   super.processArguments(args);
-  if (!usagesTable.isEmpty()) {
-usagesTable.printToStream(out);
+  if (!getUsagesTable().isEmpty()) {
+getUsagesTable().printToStream(out);
   }
 }
 
-@Override
-protected void processPath(PathData item) throws IOException {
-  FsStatus fsStats = item.fs.getStatus(item.path);
-  long size = fsStats.getCapacity();
-  long used = fsStats.getUsed();
-  long free = fsStats.getRemaining();
-
-  usagesTable.addRow(
-  item.fs.getUri(),
+/**
+ * Add a 

hadoop git commit: YARN-5649. Add REST endpoints for updating application timeouts. Contributed by Rohith Sharma K S

2016-11-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 accd9136e -> 437965804


YARN-5649. Add REST endpoints for updating application timeouts. Contributed by 
Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43796580
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43796580
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43796580

Branch: refs/heads/branch-2
Commit: 43796580421346f254d066c0c58ae13937cdbd45
Parents: accd913
Author: Jian He 
Authored: Wed Nov 23 16:25:29 2016 -0800
Committer: Jian He 
Committed: Wed Nov 23 16:27:12 2016 -0800

--
 .../server/resourcemanager/ClientRMService.java |   2 +-
 .../server/resourcemanager/RMAuditLogger.java   |   1 +
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../webapp/JAXBContextResolver.java |   3 +-
 .../resourcemanager/webapp/RMWebServices.java   | 186 +++
 .../resourcemanager/webapp/dao/AppInfo.java |  24 +++
 .../webapp/dao/AppTimeoutInfo.java  |  71 +++
 .../webapp/dao/AppTimeoutsInfo.java |  47 +
 .../server/resourcemanager/rmapp/MockRMApp.java |   3 +-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 .../TestRMWebServicesAppsModification.java  | 137 +-
 11 files changed, 472 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43796580/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 21a9d13..8a0f373 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1734,7 +1734,7 @@ public class ClientRMService extends AbstractService 
implements
   RMAuditLogger.logFailure(callerUGI.getShortUserName(),
   AuditConstants.UPDATE_APP_TIMEOUTS, "UNKNOWN", "ClientRMService",
   ex.getMessage());
-  throw RPCUtil.getRemoteException(ex);
+  throw ex;
 }
 
 RMAuditLogger.logSuccess(callerUGI.getShortUserName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43796580/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
index d52e002..051d979 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
@@ -66,6 +66,7 @@ public class RMAuditLogger {
 "Update Application Priority";
 public static final String UPDATE_APP_TIMEOUTS =
 "Update Application Timeouts";
+public static final String GET_APP_TIMEOUTS = "Get Application Timeouts";
 public static final String CHANGE_CONTAINER_RESOURCE =
 "AM Changed Container Resource";
 public static final String SIGNAL_CONTAINER = "Signal Container Request";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43796580/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 

hadoop git commit: YARN-5649. Add REST endpoints for updating application timeouts. Contributed by Rohith Sharma K S

2016-11-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3d94da1e0 -> 1f12867a6


YARN-5649. Add REST endpoints for updating application timeouts. Contributed by 
Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f12867a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f12867a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f12867a

Branch: refs/heads/trunk
Commit: 1f12867a69544a1642aa986d4f9a8249be495434
Parents: 3d94da1
Author: Jian He 
Authored: Wed Nov 23 16:25:29 2016 -0800
Committer: Jian He 
Committed: Wed Nov 23 16:25:39 2016 -0800

--
 .../server/resourcemanager/ClientRMService.java |   2 +-
 .../server/resourcemanager/RMAuditLogger.java   |   1 +
 .../server/resourcemanager/RMServerUtils.java   |   2 +-
 .../webapp/JAXBContextResolver.java |   3 +-
 .../resourcemanager/webapp/RMWebServices.java   | 186 +++
 .../resourcemanager/webapp/dao/AppInfo.java |  24 +++
 .../webapp/dao/AppTimeoutInfo.java  |  71 +++
 .../webapp/dao/AppTimeoutsInfo.java |  47 +
 .../server/resourcemanager/rmapp/MockRMApp.java |   3 +-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 .../TestRMWebServicesAppsModification.java  | 137 +-
 11 files changed, 472 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f12867a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 4e36b6c..0db775f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1752,7 +1752,7 @@ public class ClientRMService extends AbstractService 
implements
   RMAuditLogger.logFailure(callerUGI.getShortUserName(),
   AuditConstants.UPDATE_APP_TIMEOUTS, "UNKNOWN", "ClientRMService",
   ex.getMessage());
-  throw RPCUtil.getRemoteException(ex);
+  throw ex;
 }
 
 RMAuditLogger.logSuccess(callerUGI.getShortUserName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f12867a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
index d52e002..051d979 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java
@@ -66,6 +66,7 @@ public class RMAuditLogger {
 "Update Application Priority";
 public static final String UPDATE_APP_TIMEOUTS =
 "Update Application Timeouts";
+public static final String GET_APP_TIMEOUTS = "Get Application Timeouts";
 public static final String CHANGE_CONTAINER_RESOURCE =
 "AM Changed Container Resource";
 public static final String SIGNAL_CONTAINER = "Signal Container Request";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f12867a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 

hadoop git commit: HADOOP-11552. Allow handoff on the server side for RPC requests. Contributed by Siddharth Seth

2016-11-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f3fff5c2e -> accd9136e


HADOOP-11552. Allow handoff on the server side for RPC requests. Contributed by 
Siddharth Seth

(cherry picked from commit 3d94da1e00fc6238fad458e415219f87920f1fc3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/accd9136
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/accd9136
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/accd9136

Branch: refs/heads/branch-2
Commit: accd9136e410efc51728db8706e1ea1fdfb183f3
Parents: f3fff5c
Author: Jian He 
Authored: Wed Nov 23 16:01:55 2016 -0800
Committer: Jian He 
Committed: Wed Nov 23 16:17:36 2016 -0800

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  75 ++-
 .../hadoop/ipc/ProtobufRpcEngineCallback.java   |  29 +++
 .../main/java/org/apache/hadoop/ipc/Server.java | 204 ++---
 .../apache/hadoop/ipc/WritableRpcEngine.java|   3 +-
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java  |   6 +
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |  33 +++
 .../ipc/TestProtoBufRpcServerHandoff.java   | 167 ++
 .../apache/hadoop/ipc/TestRpcServerHandoff.java | 218 +++
 .../hadoop-common/src/test/proto/test.proto |  11 +-
 .../src/test/proto/test_rpc_service.proto   |   4 +
 10 files changed, 708 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/accd9136/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 0a25cf6..3c0aaba 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
-import org.apache.hadoop.ipc.RpcWritable;
 import 
org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -344,6 +343,60 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
   
   public static class Server extends RPC.Server {
+
+static final ThreadLocal currentCallback =
+new ThreadLocal<>();
+
+static final ThreadLocal currentCallInfo = new ThreadLocal<>();
+
+static class CallInfo {
+  private final RPC.Server server;
+  private final String methodName;
+
+  public CallInfo(RPC.Server server, String methodName) {
+this.server = server;
+this.methodName = methodName;
+  }
+}
+
+static class ProtobufRpcEngineCallbackImpl
+implements ProtobufRpcEngineCallback {
+
+  private final RPC.Server server;
+  private final Call call;
+  private final String methodName;
+  private final long setupTime;
+
+  public ProtobufRpcEngineCallbackImpl() {
+this.server = currentCallInfo.get().server;
+this.call = Server.getCurCall().get();
+this.methodName = currentCallInfo.get().methodName;
+this.setupTime = Time.now();
+  }
+
+  @Override
+  public void setResponse(Message message) {
+long processingTime = Time.now() - setupTime;
+call.setDeferredResponse(RpcWritable.wrap(message));
+server.updateDeferredMetrics(methodName, processingTime);
+  }
+
+  @Override
+  public void error(Throwable t) {
+long processingTime = Time.now() - setupTime;
+String detailedMetricsName = t.getClass().getSimpleName();
+server.updateDeferredMetrics(detailedMetricsName, processingTime);
+call.setDeferredError(t);
+  }
+}
+
+@InterfaceStability.Unstable
+public static ProtobufRpcEngineCallback registerForDeferredResponse() {
+  ProtobufRpcEngineCallback callback = new ProtobufRpcEngineCallbackImpl();
+  currentCallback.set(callback);
+  return callback;
+}
+
 /**
  * Construct an RPC server.
  * 
@@ -442,9 +495,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 long startTime = Time.now();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
+boolean isDeferred = false;
 try {
   server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
+  

hadoop git commit: HADOOP-11552. Allow handoff on the server side for RPC requests. Contributed by Siddharth Seth

2016-11-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0de0c32dd -> 3d94da1e0


HADOOP-11552. Allow handoff on the server side for RPC requests. Contributed by 
Siddharth Seth


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d94da1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d94da1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d94da1e

Branch: refs/heads/trunk
Commit: 3d94da1e00fc6238fad458e415219f87920f1fc3
Parents: 0de0c32
Author: Jian He 
Authored: Wed Nov 23 16:01:55 2016 -0800
Committer: Jian He 
Committed: Wed Nov 23 16:01:55 2016 -0800

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  75 ++-
 .../hadoop/ipc/ProtobufRpcEngineCallback.java   |  29 +++
 .../main/java/org/apache/hadoop/ipc/Server.java | 197 +
 .../apache/hadoop/ipc/WritableRpcEngine.java|   3 +-
 .../hadoop/ipc/metrics/RpcDetailedMetrics.java  |   6 +
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |  33 +++
 .../ipc/TestProtoBufRpcServerHandoff.java   | 167 ++
 .../apache/hadoop/ipc/TestRpcServerHandoff.java | 218 +++
 .../hadoop-common/src/test/proto/test.proto |  11 +-
 .../src/test/proto/test_rpc_service.proto   |   6 +-
 10 files changed, 696 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d94da1e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index e68bfd4..b3f5458 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
-import org.apache.hadoop.ipc.RpcWritable;
 import 
org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
@@ -345,6 +344,60 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
   
   public static class Server extends RPC.Server {
+
+static final ThreadLocal currentCallback =
+new ThreadLocal<>();
+
+static final ThreadLocal currentCallInfo = new ThreadLocal<>();
+
+static class CallInfo {
+  private final RPC.Server server;
+  private final String methodName;
+
+  public CallInfo(RPC.Server server, String methodName) {
+this.server = server;
+this.methodName = methodName;
+  }
+}
+
+static class ProtobufRpcEngineCallbackImpl
+implements ProtobufRpcEngineCallback {
+
+  private final RPC.Server server;
+  private final Call call;
+  private final String methodName;
+  private final long setupTime;
+
+  public ProtobufRpcEngineCallbackImpl() {
+this.server = currentCallInfo.get().server;
+this.call = Server.getCurCall().get();
+this.methodName = currentCallInfo.get().methodName;
+this.setupTime = Time.now();
+  }
+
+  @Override
+  public void setResponse(Message message) {
+long processingTime = Time.now() - setupTime;
+call.setDeferredResponse(RpcWritable.wrap(message));
+server.updateDeferredMetrics(methodName, processingTime);
+  }
+
+  @Override
+  public void error(Throwable t) {
+long processingTime = Time.now() - setupTime;
+String detailedMetricsName = t.getClass().getSimpleName();
+server.updateDeferredMetrics(detailedMetricsName, processingTime);
+call.setDeferredError(t);
+  }
+}
+
+@InterfaceStability.Unstable
+public static ProtobufRpcEngineCallback registerForDeferredResponse() {
+  ProtobufRpcEngineCallback callback = new ProtobufRpcEngineCallbackImpl();
+  currentCallback.set(callback);
+  return callback;
+}
+
 /**
  * Construct an RPC server.
  * 
@@ -462,9 +515,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 long startTime = Time.now();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
+boolean isDeferred = false;
 try {
   server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
+  currentCallInfo.set(new CallInfo(server, methodName));
   result = 

[66/66] [abbrv] hadoop git commit: YARN-5813. Slider should not try to set a negative lifetime timeout value. Contributed by Jian He

2016-11-23 Thread jianhe
YARN-5813. Slider should not try to set a negative lifetime timeout value. 
Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65292bd1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65292bd1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65292bd1

Branch: refs/heads/yarn-native-services
Commit: 65292bd1f867bf4c7f4bcf184078ab796a7af8d7
Parents: 5131803
Author: Gour Saha 
Authored: Tue Nov 1 17:39:54 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../src/main/java/org/apache/slider/client/SliderClient.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65292bd1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index d1f88c5..ea10ed0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -2120,7 +2120,9 @@ public class SliderClient extends 
AbstractSliderLaunchedService implements RunSe
 amLauncher.setKeepContainersOverRestarts(true);
 // set lifetime in submission context;
 Map appTimeout = new HashMap<>();
-appTimeout.put(ApplicationTimeoutType.LIFETIME, lifetime);
+if (lifetime >= 0) {
+  appTimeout.put(ApplicationTimeoutType.LIFETIME, lifetime);
+}
 amLauncher.submissionContext.setApplicationTimeouts(appTimeout);
 int maxAppAttempts = config.getInt(KEY_AM_RESTART_LIMIT, 0);
 amLauncher.setMaxAppAttempts(maxAppAttempts);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[60/66] [abbrv] hadoop git commit: YARN-5729. Bug fixes for the service Rest API. Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5729. Bug fixes for the service Rest API. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/913b242b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/913b242b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/913b242b

Branch: refs/heads/yarn-native-services
Commit: 913b242ba949e3a43591e9d6c652502c37d23431
Parents: f273d93
Author: Jian He 
Authored: Fri Oct 14 13:47:38 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../services/api/impl/ApplicationApiService.java| 16 ++--
 .../hadoop/yarn/services/resource/Application.java  |  6 +++---
 .../hadoop/yarn/services/resource/Artifact.java |  4 +++-
 .../hadoop/yarn/services/resource/Component.java|  4 +++-
 .../hadoop/yarn/services/resource/ConfigFile.java   |  4 +++-
 .../yarn/services/resource/Configuration.java   |  4 +++-
 .../hadoop/yarn/services/resource/Container.java|  6 +++---
 .../yarn/services/resource/PlacementPolicy.java |  4 +++-
 .../yarn/services/resource/ReadinessCheck.java  |  4 +++-
 .../hadoop/yarn/services/resource/Resource.java |  2 +-
 10 files changed, 39 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/913b242b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 0a62629..21cf113 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -276,7 +276,7 @@ public class ApplicationApiService implements 
ApplicationApi {
 
 // If it is a simple app with no components, then create a default 
component
 if (application.getComponents() == null) {
-  application.setComponents(getDefaultComponentAsList());
+  application.setComponents(getDefaultComponentAsList(application));
 }
 
 // Application lifetime if not specified, is set to unlimited lifetime
@@ -1029,7 +1029,8 @@ public class ApplicationApiService implements 
ApplicationApi {
 // end-users point of view, is out of scope of the REST API. Also, this
 // readiness has nothing to do with readiness-check defined at the 
component
 // level (which is used for dependency resolution of component DAG).
-if (totalNumberOfIpAssignedContainers == 
totalExpectedNumberOfRunningContainers) {
+if (totalNumberOfIpAssignedContainers
+.longValue() == totalExpectedNumberOfRunningContainers.longValue()) {
   app.setState(ApplicationState.READY);
 }
 logger.info("Application = {}", app);
@@ -1389,6 +1390,17 @@ public class ApplicationApiService implements 
ApplicationApi {
 return Response.status(Status.NO_CONTENT).build();
   }
 
+  // create default component and initialize with app level global values
+  private List getDefaultComponentAsList(Application app) {
+List comps = getDefaultComponentAsList();
+Component comp = comps.get(0);
+comp.setArtifact(app.getArtifact());
+comp.setResource(app.getResource());
+comp.setNumberOfContainers(app.getNumberOfContainers());
+comp.setLaunchCommand(app.getLaunchCommand());
+return comps;
+  }
+
   private List getDefaultComponentAsList() {
 Component comp = new Component();
 comp.setName(DEFAULT_COMPONENT_NAME);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/913b242b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
index 719bf95..ed65ad2 100644
--- 

[63/66] [abbrv] hadoop git commit: YARN-5610. Initial code for native services REST API. Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5610. Initial code for native services REST API. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7ba4349
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7ba4349
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7ba4349

Branch: refs/heads/yarn-native-services
Commit: e7ba43490f87ee462716c21ae981d1cde7d5fc64
Parents: 97f73ad
Author: Jian He 
Authored: Tue Oct 11 11:36:57 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 hadoop-project/pom.xml  |   20 +
 .../dev-support/findbugs-exclude.xml|   20 +
 .../hadoop-yarn-services-api/pom.xml|  225 +++
 .../yarn/services/api/ApplicationApi.java   |   38 +
 .../api/impl/ApplicationApiService.java | 1527 ++
 .../yarn/services/resource/Application.java |  452 ++
 .../services/resource/ApplicationState.java |   25 +
 .../services/resource/ApplicationStatus.java|  147 ++
 .../hadoop/yarn/services/resource/Artifact.java |  155 ++
 .../yarn/services/resource/BaseResource.java|   48 +
 .../yarn/services/resource/Component.java   |  377 +
 .../yarn/services/resource/ConfigFile.java  |  190 +++
 .../yarn/services/resource/Configuration.java   |  147 ++
 .../yarn/services/resource/Container.java   |  256 +++
 .../yarn/services/resource/ContainerState.java  |   25 +
 .../hadoop/yarn/services/resource/Error.java|  125 ++
 .../yarn/services/resource/PlacementPolicy.java |   97 ++
 .../yarn/services/resource/ReadinessCheck.java  |  161 ++
 .../hadoop/yarn/services/resource/Resource.java |  149 ++
 .../yarn/services/utils/RestApiConstants.java   |   66 +
 .../services/utils/RestApiErrorMessages.java|   79 +
 .../services/webapp/ApplicationApiWebApp.java   |  127 ++
 .../src/main/resources/log4j-server.properties  |   76 +
 .../resources/webapps/services-rest-api/app |   16 +
 .../src/main/scripts/run_rest_service.sh|   28 +
 .../src/main/webapp/WEB-INF/web.xml |   36 +
 .../api/impl/TestApplicationApiService.java |  232 +++
 .../hadoop-yarn-applications/pom.xml|2 +-
 28 files changed, 4845 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ba4349/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 112fa18..6d63113 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -127,6 +127,9 @@
 1.0-alpha-8
 900
 1.11.45
+
+1.5.4
+
1.4
   
 
   
@@ -1236,6 +1239,23 @@
   kerb-simplekdc
   1.0.0-RC2
 
+
+  
+io.swagger
+swagger-annotations
+${swagger-annotations-version}
+  
+  
+com.fasterxml.jackson.jaxrs
+jackson-jaxrs-json-provider
+${jackson2.version}
+  
+  
+org.apache.maven.doxia
+doxia-module-markdown
+${maven-doxia-module-markdown.version}
+  
+
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ba4349/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
new file mode 100644
index 000..b89146a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/dev-support/findbugs-exclude.xml
@@ -0,0 +1,20 @@
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ba4349/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
new file mode 100644
index 000..78b7855
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
@@ -0,0 +1,225 @@
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd;>
+  4.0.0
+  
+org.apache.hadoop
+hadoop-yarn-applications
+3.0.0-alpha2-SNAPSHOT
+  
+  org.apache.hadoop
+  hadoop-yarn-services-api
+  Apache Hadoop YARN Services API
+  

[54/66] [abbrv] hadoop git commit: YARN-5808. Add gc log options to the yarn daemon script when starting services-api. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5808. Add gc log options to the yarn daemon script when starting 
services-api. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b02252e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b02252e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b02252e2

Branch: refs/heads/yarn-native-services
Commit: b02252e21476880b104bfaa35ce2766bd228ba6c
Parents: dfbb075
Author: Gour Saha 
Authored: Thu Nov 10 11:35:02 2016 -0800
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 hadoop-yarn-project/hadoop-yarn/bin/yarn | 16 
 hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh | 12 
 2 files changed, 20 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b02252e2/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 26d54b8..2396a7a 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -47,7 +47,7 @@ function hadoop_usage
   hadoop_add_subcommand "resourcemanager" "run the ResourceManager"
   hadoop_add_subcommand "rmadmin" "admin tools"
   hadoop_add_subcommand "scmadmin" "SharedCacheManager admin tools"
-  hadoop_add_subcommand "services-api" "run slider services api"
+  hadoop_add_subcommand "servicesapi" "run slider services api"
   hadoop_add_subcommand "sharedcachemanager" "run the SharedCacheManager 
daemon"
   hadoop_add_subcommand "slider" "run a slider app"
   hadoop_add_subcommand "timelinereader" "run the timeline reader server"
@@ -144,20 +144,20 @@ function yarncmd_case
 scmadmin)
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.SCMAdmin'
 ;;
-services-api)
+servicesapi)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
   hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider"'/*'
   hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/services-api"'/*'
   
HADOOP_CLASSNAME='org.apache.hadoop.yarn.services.webapp.ApplicationApiWebApp'
-  hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS} \
--Dslider.libdir=${HADOOP_YARN_HOME}/${YARN_DIR},\
+  local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider,\
 ${HADOOP_HDFS_HOME}/${HDFS_DIR},\
 ${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
+  hadoop_translate_cygwin_path sld
+  hadoop_add_param HADOOP_OPTS slider.libdir "-Dslider.libdir=${sld}"
 ;;
 sharedcachemanager)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
@@ -166,15 +166,15 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
 slider)
   hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider"'/*'
   HADOOP_CLASSNAME='org.apache.slider.Slider'
-  hadoop_debug "Append YARN_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${YARN_CLIENT_OPTS} \
--Dslider.libdir=${HADOOP_YARN_HOME}/${YARN_DIR},\
+  local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\
 ${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}/slider,\
 ${HADOOP_HDFS_HOME}/${HDFS_DIR},\
 ${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\
 ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
+  hadoop_translate_cygwin_path sld
+  hadoop_add_param HADOOP_OPTS slider.libdir "-Dslider.libdir=${sld}"
 ;;
 timelinereader)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b02252e2/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
--
diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh 
b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
index d003adb..3828897 100644
--- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
+++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-env.sh
@@ -136,3 +136,15 @@
 # See ResourceManager for some examples
 #
 #export YARN_SHAREDCACHEMANAGER_OPTS=
+
+###
+# Services API specific parameters
+###
+# Specify the JVM options to be used when starting the services API.
+#
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# See ResourceManager for some examples
+#
+#export YARN_SERVICESAPI_OPTS="-verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 

[46/66] [abbrv] hadoop git commit: YARN-5505. Create an agent-less docker provider in the native-services framework. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa8152d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
index 8203cf0..fdc5be1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentClientProvider.java
@@ -82,6 +82,8 @@ import java.util.Set;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
+import static 
org.apache.slider.common.tools.SliderUtils.getApplicationDefinitionPath;
+
 /** This class implements  the client-side aspects of the agent deployer */
 public class AgentClientProvider extends AbstractClientProvider
 implements AgentKeys, SliderKeys {
@@ -132,13 +134,13 @@ public class AgentClientProvider extends 
AbstractClientProvider
 sliderFileSystem.verifyFileExists(appDefPath);
 
 String agentConf = instanceDefinition.getAppConfOperations().
-getGlobalOptions().getOption(AgentKeys.AGENT_CONF, "");
+getGlobalOptions().getOption(AGENT_CONF, "");
 if (StringUtils.isNotEmpty(agentConf)) {
   sliderFileSystem.verifyFileExists(new Path(agentConf));
 }
 
 String appHome = instanceDefinition.getAppConfOperations().
-getGlobalOptions().get(AgentKeys.PACKAGE_PATH);
+getGlobalOptions().get(PACKAGE_PATH);
 if (SliderUtils.isUnset(appHome)) {
   String agentImage = instanceDefinition.getInternalOperations().
   get(InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH);
@@ -173,7 +175,7 @@ public class AgentClientProvider extends 
AbstractClientProvider
 }
 
 Set names = resources.getComponentNames();
-names.remove(SliderKeys.COMPONENT_AM);
+names.remove(COMPONENT_AM);
 Map priorityMap = new HashMap();
 
 for (String name : names) {
@@ -271,7 +273,7 @@ public class AgentClientProvider extends 
AbstractClientProvider
 String agentImage = instanceDefinition.getInternalOperations().
 get(InternalKeys.INTERNAL_APPLICATION_IMAGE_PATH);
 if (SliderUtils.isUnset(agentImage)) {
-  Path agentPath = new Path(tempPath.getParent(), 
AgentKeys.PROVIDER_AGENT);
+  Path agentPath = new Path(tempPath.getParent(), PROVIDER_AGENT);
   log.info("Automatically uploading the agent tarball at {}", agentPath);
   fileSystem.getFileSystem().mkdirs(agentPath);
   if (ProviderUtils.addAgentTar(this, AGENT_TAR, fileSystem, agentPath)) {
@@ -284,6 +286,12 @@ public class AgentClientProvider extends 
AbstractClientProvider
 
   @Override
   public Set getApplicationTags(SliderFileSystem fileSystem,
+  ConfTreeOperations appConf) throws SliderException {
+return getApplicationTags(fileSystem,
+getApplicationDefinitionPath(appConf));
+  }
+
+  public Set getApplicationTags(SliderFileSystem fileSystem,
 String appDef) throws SliderException {
 Set tags;
 Metainfo metaInfo = getMetainfo(fileSystem, appDef);
@@ -437,19 +445,19 @@ public class AgentClientProvider extends 
AbstractClientProvider
 if (config != null) {
   try {
 clientRoot = config.getJSONObject("global")
-.getString(AgentKeys.APP_CLIENT_ROOT);
+.getString(APP_CLIENT_ROOT);
   } catch (JSONException e) {
 log.info("Couldn't read {} from provided client config, falling " +
-"back on default", AgentKeys.APP_CLIENT_ROOT);
+"back on default", APP_CLIENT_ROOT);
   }
 }
 if (clientRoot == null && defaultConfig != null) {
   try {
 clientRoot = defaultConfig.getJSONObject("global")
-.getString(AgentKeys.APP_CLIENT_ROOT);
+.getString(APP_CLIENT_ROOT);
   } catch (JSONException e) {
 log.info("Couldn't read {} from default client config, using {}",
-AgentKeys.APP_CLIENT_ROOT, clientInstallPath);
+APP_CLIENT_ROOT, clientInstallPath);
   }
 }
 if (clientRoot == null) {
@@ -500,7 +508,7 @@ public class AgentClientProvider extends 
AbstractClientProvider
 try {
   String clientScriptPath = appPkgDir.getAbsolutePath() + File.separator + 

[36/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/Messages.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/Messages.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/Messages.java
new file mode 100644
index 000..373d64d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/Messages.java
@@ -0,0 +1,34473 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: SliderClusterMessages.proto
+
+package org.apache.slider.api.proto;
+
+public final class Messages {
+  private Messages() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RoleInstanceStateOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string name = 1;
+/**
+ * required string name = 1;
+ */
+boolean hasName();
+/**
+ * required string name = 1;
+ */
+java.lang.String getName();
+/**
+ * required string name = 1;
+ */
+com.google.protobuf.ByteString
+getNameBytes();
+
+// optional string role = 2;
+/**
+ * optional string role = 2;
+ */
+boolean hasRole();
+/**
+ * optional string role = 2;
+ */
+java.lang.String getRole();
+/**
+ * optional string role = 2;
+ */
+com.google.protobuf.ByteString
+getRoleBytes();
+
+// required uint32 state = 4;
+/**
+ * required uint32 state = 4;
+ */
+boolean hasState();
+/**
+ * required uint32 state = 4;
+ */
+int getState();
+
+// required uint32 exitCode = 5;
+/**
+ * required uint32 exitCode = 5;
+ */
+boolean hasExitCode();
+/**
+ * required uint32 exitCode = 5;
+ */
+int getExitCode();
+
+// optional string command = 6;
+/**
+ * optional string command = 6;
+ */
+boolean hasCommand();
+/**
+ * optional string command = 6;
+ */
+java.lang.String getCommand();
+/**
+ * optional string command = 6;
+ */
+com.google.protobuf.ByteString
+getCommandBytes();
+
+// optional string diagnostics = 7;
+/**
+ * optional string diagnostics = 7;
+ */
+boolean hasDiagnostics();
+/**
+ * optional string diagnostics = 7;
+ */
+java.lang.String getDiagnostics();
+/**
+ * optional string diagnostics = 7;
+ */
+com.google.protobuf.ByteString
+getDiagnosticsBytes();
+
+// repeated string output = 8;
+/**
+ * repeated string output = 8;
+ */
+java.util.List
+getOutputList();
+/**
+ * repeated string output = 8;
+ */
+int getOutputCount();
+/**
+ * repeated string output = 8;
+ */
+java.lang.String getOutput(int index);
+/**
+ * repeated string output = 8;
+ */
+com.google.protobuf.ByteString
+getOutputBytes(int index);
+
+// repeated string environment = 9;
+/**
+ * repeated string environment = 9;
+ */
+java.util.List
+getEnvironmentList();
+/**
+ * repeated string environment = 9;
+ */
+int getEnvironmentCount();
+/**
+ * repeated string environment = 9;
+ */
+java.lang.String getEnvironment(int index);
+/**
+ * repeated string environment = 9;
+ */
+com.google.protobuf.ByteString
+getEnvironmentBytes(int index);
+
+// required uint32 roleId = 10;
+/**
+ * required uint32 roleId = 10;
+ */
+boolean hasRoleId();
+/**
+ * required uint32 roleId = 10;
+ */
+int getRoleId();
+
+// required bool released = 11;
+/**
+ * required bool released = 11;
+ */
+boolean hasReleased();
+/**
+ * required bool released = 11;
+ */
+boolean getReleased();
+
+// required int64 createTime = 12;
+/**
+ * required int64 createTime = 12;
+ */
+boolean hasCreateTime();
+/**
+ * required int64 createTime = 12;
+ */
+long getCreateTime();
+
+// required int64 startTime = 13;
+/**
+ * required int64 startTime = 13;
+ */
+boolean hasStartTime();
+/**
+ * required int64 startTime = 13;
+ */
+long getStartTime();
+
+// required string host = 14;
+/**
+ * required string host = 14;
+ */
+boolean hasHost();
+/**
+ * required string host = 14;
+ */
+java.lang.String getHost();
+/**
+ * required string host = 14;
+ */
+

[62/66] [abbrv] hadoop git commit: YARN-5610. Initial code for native services REST API. Contributed by Gour Saha

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ba4349/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
new file mode 100644
index 000..cfcae95
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
@@ -0,0 +1,452 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.services.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.slider.providers.PlacementPolicy;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonPropertyOrder;
+
+/**
+ * An Application resource has the following attributes.
+ **/
+
+@ApiModel(description = "An Application resource has the following 
attributes.")
+@javax.annotation.Generated(value = "class 
io.swagger.codegen.languages.JavaClientCodegen", date = 
"2016-06-02T08:15:05.615-07:00")
+@XmlRootElement
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonPropertyOrder({ " name, state, resource, numberOfContainers, lifetime, 
containers " })
+public class Application extends BaseResource {
+  private static final long serialVersionUID = -4491694636566094885L;
+
+  private String id = null;
+  private String name = null;
+  private Artifact artifact = null;
+  private Resource resource = null;
+  private String launchCommand = null;
+  private Date launchTime = null;
+  private Long numberOfContainers = null;
+  private Long numberOfRunningContainers = null;
+  private Long lifetime = null;
+  private PlacementPolicy placementPolicy = null;
+  private List components = null;
+  private Configuration configuration = null;
+  private List containers = new ArrayList<>();
+  private ApplicationState state = null;
+  private Map quicklinks = null;
+  private String queue;
+
+  /**
+   * A unique application id.
+   **/
+  public Application id(String id) {
+this.id = id;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", required = true, value = "A unique 
application id.")
+  @JsonProperty("id")
+  public String getId() {
+return id;
+  }
+
+  public void setId(String id) {
+this.id = id;
+  }
+
+  /**
+   * A unique application name.
+   **/
+  public Application name(String name) {
+this.name = name;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", required = true, value = "A unique 
application name.")
+  @JsonProperty("name")
+  public String getName() {
+return name;
+  }
+
+  public void setName(String name) {
+this.name = name;
+  }
+
+  /**
+   * Artifact of single-component applications. Mandatory if components
+   * attribute is not specified.
+   **/
+  public Application artifact(Artifact artifact) {
+this.artifact = artifact;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Artifact of single-component 
applications. Mandatory if components attribute is not specified.")
+  @JsonProperty("artifact")
+  public Artifact getArtifact() {
+return artifact;
+  }
+
+  public void setArtifact(Artifact artifact) {
+this.artifact = artifact;
+  }
+
+  /**
+   * Resource of single-component applications or the global default for
+   * multi-component applications. Mandatory if it is a single-component
+   * application and if cpus and memory are not specified at the 

[33/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
new file mode 100644
index 000..9879d05
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ApplicationLivenessInformation.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.types;
+
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+/**
+ * Serialized information about liveness
+ * 
+ *   If true liveness probes are implemented, this
+ *   datatype can be extended to publish their details.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class ApplicationLivenessInformation {
+  /** flag set if the cluster is at size */
+  public boolean allRequestsSatisfied;
+
+  /** number of outstanding requests: those needed to satisfy */
+  public int requestsOutstanding;
+
+  /** number of requests submitted to YARN */
+  public int activeRequests;
+
+  @Override
+  public String toString() {
+final StringBuilder sb =
+new StringBuilder("ApplicationLivenessInformation{");
+sb.append("allRequestsSatisfied=").append(allRequestsSatisfied);
+sb.append(", requestsOutstanding=").append(requestsOutstanding);
+sb.append('}');
+return sb.toString();
+  }
+}
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ComponentInformation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ComponentInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ComponentInformation.java
new file mode 100644
index 000..c46a59f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/types/ComponentInformation.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.types;
+
+import org.apache.slider.api.StatusKeys;
+import org.apache.slider.server.appmaster.state.RoleStatus;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Serializable version of component data.
+ * 
+ * This is sent in REST 

[55/66] [abbrv] hadoop git commit: YARN-5812. Exception during GET call - "Failed to retrieve application: null". Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5812. Exception during GET call - "Failed to retrieve application: null". 
Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66b3c2ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66b3c2ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66b3c2ac

Branch: refs/heads/yarn-native-services
Commit: 66b3c2ac33d7edf027eaee47fa5fc059a4024802
Parents: 65292bd
Author: Jian He 
Authored: Wed Nov 2 15:55:48 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../api/impl/ApplicationApiService.java | 36 ++--
 1 file changed, 26 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66b3c2ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 37bd134..6db69ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -50,7 +50,6 @@ import javax.ws.rs.core.Response.Status;
 
 import org.apache.commons.lang.SerializationUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -905,6 +904,10 @@ public class ApplicationApiService implements 
ApplicationApi {
 
 // state
 String appState = jsonGetAsString(appStatus, "state");
+if (appState == null) {
+  // consider that app is still in ACCEPTED state
+  appState = String.valueOf(StateValues.STATE_INCOMPLETE);
+}
 switch (Integer.parseInt(appState)) {
   case StateValues.STATE_LIVE:
 app.setState(ApplicationState.STARTED);
@@ -1069,6 +1072,9 @@ public class ApplicationApiService implements 
ApplicationApi {
 String status = null;
 try {
   status = sliderClient.actionStatus(appName);
+} catch (BadClusterStateException e) {
+  logger.warn("Application not running yet", e);
+  return EMPTY_JSON_OBJECT;
 } catch (Exception e) {
   logger.error("Exception calling slider.actionStatus", e);
   return EMPTY_JSON_OBJECT;
@@ -1097,7 +1103,7 @@ public class ApplicationApiService implements 
ApplicationApi {
 try {
   registry = sliderClient.actionRegistryGetConfig(registryArgs)
 .asJson();
-} catch (FileNotFoundException | PathNotFoundException e) {
+} catch (FileNotFoundException | NotFoundException e) {
   // ignore and return empty object
   return EMPTY_JSON_OBJECT;
 } catch (Exception e) {
@@ -1192,23 +1198,33 @@ public class ApplicationApiService implements 
ApplicationApi {
 // little longer for it to stop from YARN point of view. Slider destroy
 // fails if the application is not completely stopped. Hence the need to
 // call destroy in a controlled loop few times (only if exit code is
-// EXIT_APPLICATION_IN_USE), before giving up.
+// EXIT_APPLICATION_IN_USE or EXIT_INSTANCE_EXISTS), before giving up.
 boolean keepTrying = true;
-int maxDeleteAttempt = 5;
-int deleteAttempt = 0;
-while (keepTrying && deleteAttempt < maxDeleteAttempt) {
+int maxDeleteAttempts = 5;
+int deleteAttempts = 0;
+int sleepIntervalInMillis = 500;
+while (keepTrying && deleteAttempts < maxDeleteAttempts) {
   try {
 destroySliderApplication(appName);
 keepTrying = false;
   } catch (SliderException e) {
-logger.error("Delete application threw exception", e);
-if (e.getExitCode() == SliderExitCodes.EXIT_APPLICATION_IN_USE) {
-  deleteAttempt++;
+if (e.getExitCode() == SliderExitCodes.EXIT_APPLICATION_IN_USE
+|| e.getExitCode() == SliderExitCodes.EXIT_INSTANCE_EXISTS) {
+  deleteAttempts++;
+  // If we used up all the allowed 

[45/66] [abbrv] hadoop git commit: YARN-5505. Create an agent-less docker provider in the native-services framework. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa8152d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
new file mode 100644
index 000..bebb5f0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.docker;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.OptionKeys;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.launch.CommandLineBuilder;
+import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.core.registry.docstore.ConfigFormat;
+import org.apache.slider.core.registry.docstore.ConfigUtils;
+import org.apache.slider.core.registry.docstore.ExportEntry;
+import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.providers.ProviderCore;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.providers.ProviderUtils;
+import org.apache.slider.server.appmaster.state.RoleInstance;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Scanner;
+
+public class DockerProviderService extends AbstractProviderService implements
+ProviderCore,
+DockerKeys,
+SliderKeys {
+
+  protected static final Logger log =
+  LoggerFactory.getLogger(DockerProviderService.class);
+  private static final ProviderUtils providerUtils = new ProviderUtils(log);
+  private static final String EXPORT_GROUP = "quicklinks";
+  private static final String APPLICATION_TAG = "application";
+
+  private String clusterName = null;
+  private SliderFileSystem fileSystem = null;
+
+  protected DockerProviderService() {
+super("DockerProviderService");
+  }
+
+  @Override
+  public List getRoles() {
+return Collections.emptyList();
+  }
+
+  @Override
+  public boolean isSupportedRole(String role) {
+return true;
+  }
+
+  @Override
+  public void validateInstanceDefinition(AggregateConf instanceDefinition)
+  throws SliderException {
+  }
+
+  private String getClusterName() {
+if (SliderUtils.isUnset(clusterName)) {
+  clusterName = 
getAmState().getInternalsSnapshot().get(OptionKeys.APPLICATION_NAME);
+}
+return clusterName;
+  }
+
+  @Override
+  public void 

[44/66] [abbrv] hadoop git commit: YARN-5513. Move Java only tests from slider develop to yarn-native-services. Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5513. Move Java only tests from slider develop to yarn-native-services. 
Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59a89750
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59a89750
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59a89750

Branch: refs/heads/yarn-native-services
Commit: 59a89750d988ea29814bc7b06d0c33ed89bef40f
Parents: e89fad2
Author: Jian He 
Authored: Wed Aug 17 00:42:24 2016 +0800
Committer: Jian He 
Committed: Wed Nov 23 15:25:49 2016 -0800

--
 .../dev-support/findbugs-exclude.xml|  20 +
 .../hadoop-yarn-slider-core/pom.xml |  22 +
 .../slider/common/tools/TestSliderUtils.java| 159 
 .../core/launch/TestAppMasterLauncher.java  | 157 
 .../TestAppMasterLauncherWithAmReset.java   |  92 ++
 .../TestPublishedConfigurationOutputter.java| 222 +
 .../agent/TestAgentClientProvider.java  |  77 ++
 .../agent/TestAgentLaunchParameter.java |  76 ++
 .../slider/providers/agent/TestAgentUtils.java  |  94 ++
 .../agent/TestAppDefinitionPersister.java   | 264 ++
 .../agent/TestComponentTagProvider.java | 115 +++
 .../slider/providers/agent/TestState.java   |  33 +
 .../application/metadata/TestConfigParser.java  | 107 +++
 .../metadata/TestMetainfoParser.java| 177 
 .../appmaster/TestServiceRecordAttributes.java  |  68 ++
 .../publisher/TestAgentProviderService.java |  60 ++
 .../publisher/TestSliderProviderFactory.java|  40 +
 .../server/servicemonitor/TestPortProbe.java|  37 +
 .../security/TestCertificateManager.java| 540 +++
 .../TestMultiThreadedStoreGeneration.java   | 156 
 .../server/services/workflow/MockService.java   |  80 ++
 .../workflow/ParentWorkflowTestBase.java|  70 ++
 .../workflow/ProcessCommandFactory.java |  96 ++
 .../services/workflow/SimpleRunnable.java   |  46 +
 .../workflow/TestWorkflowClosingService.java| 116 +++
 .../workflow/TestWorkflowCompositeService.java  | 113 +++
 .../workflow/TestWorkflowExecutorService.java   |  66 ++
 .../workflow/TestWorkflowRpcService.java| 107 +++
 .../workflow/TestWorkflowSequenceService.java   | 151 
 .../TestWorkflowServiceTerminatingRunnable.java |  64 ++
 .../workflow/WorkflowServiceTestBase.java   | 139 +++
 .../apache/slider/test/ContractTestUtils.java   | 901 +++
 .../slider/test/MiniZooKeeperCluster.java   | 395 
 .../org/apache/slider/tools/TestUtility.java| 181 
 .../slider/common/tools/test/metainfo.txt   |  16 +
 .../slider/common/tools/test/metainfo.xml   |  98 ++
 .../slider/common/tools/test/someOtherFile.txt  |  16 +
 .../slider/common/tools/test/someOtherFile.xml  |  17 +
 .../agent/application/metadata/metainfo.xml | 180 
 39 files changed, 5368 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a89750/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/dev-support/findbugs-exclude.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/dev-support/findbugs-exclude.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/dev-support/findbugs-exclude.xml
new file mode 100644
index 000..b89146a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/dev-support/findbugs-exclude.xml
@@ -0,0 +1,20 @@
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a89750/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index 591a5ca..d778f44 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -129,6 +129,13 @@
 
 
   org.apache.hadoop
+  hadoop-common
+  test-jar
+  test
+
+
+
+  org.apache.hadoop
   hadoop-hdfs
 
 
@@ -268,6 +275,20 @@
 
 
 
+  org.easymock
+  easymock
+  3.1
+  test
+
+
+
+  org.powermock
+  powermock-api-easymock
+  1.5
+  test
+
+
+

[01/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe) [Forced Update!]

2016-11-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services 82a0ec080 -> b02252e21 (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
new file mode 100644
index 000..b8bdc59
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
@@ -0,0 +1,396 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.slider.api.proto";
+option java_outer_classname = "Messages";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package org.apache.slider.api;
+
+//import "Security.proto";
+
+/*
+  Look at SliderClusterProtocol.proto to see how to build this
+*/
+
+message RoleInstanceState {
+  required string name =1;
+  optional string role =2;
+  required uint32 state =   4;
+  required uint32 exitCode =5;
+  optional string command = 6;
+  optional string diagnostics = 7;
+  repeated string output =  8;
+  repeated string environment = 9;
+  required uint32 roleId = 10;
+  required bool released = 11;
+  required int64 createTime =  12;
+  required int64 startTime =   13;
+  required string host =   14;
+  required string hostURL =15;
+  optional string appVersion = 16;
+}
+
+/**
+ * stop the cluster
+ */
+message StopClusterRequestProto {
+  /**
+  message to include
+  */
+  required string message = 1;
+}
+
+/**
+ * stop the cluster
+ */
+message StopClusterResponseProto {
+}
+
+/**
+ * upgrade the containers
+ */
+message UpgradeContainersRequestProto {
+  /**
+  message to include
+  */
+  required string message = 1;
+  repeated string container =   2;
+  repeated string component =   3;
+}
+
+/**
+ * upgrade the containers
+ */
+message UpgradeContainersResponseProto {
+}
+
+/**
+ * flex the cluster
+ */
+message FlexClusterRequestProto {
+  required string clusterSpec = 1;
+}
+
+
+/**
+ * flex the cluster
+ */
+message FlexClusterResponseProto {
+  required bool response = 1;
+}
+
+
+/**
+ * void request
+ */
+message GetJSONClusterStatusRequestProto {
+}
+
+/**
+ * response
+ */
+message GetJSONClusterStatusResponseProto {
+  required string clusterSpec = 1;
+}
+
+/**
+ * list the nodes in a role
+ */
+message ListNodeUUIDsByRoleRequestProto {
+  required string role = 1;
+}
+
+/**
+ * list the nodes in a role
+ */
+message ListNodeUUIDsByRoleResponseProto {
+  repeated string uuid = 1 ;
+}
+
+/**
+ * get a node
+ */
+message GetNodeRequestProto {
+  required string uuid = 1;
+}
+
+
+/**
+ * response on a node
+ */
+message GetNodeResponseProto {
+   required RoleInstanceState clusterNode = 1 ;
+}
+
+/**
+ * list the nodes for the UUDs
+ */
+message GetClusterNodesRequestProto {
+  repeated string uuid = 1 ;
+}
+
+/**
+ * list the nodes in a role
+ */
+message GetClusterNodesResponseProto {
+  repeated RoleInstanceState clusterNode = 1 ;
+}
+
+/**
+ * Echo
+ */
+message EchoRequestProto {
+  required string text = 1;
+}
+
+/**
+ * Echo reply
+ */
+message EchoResponseProto {
+  required string text = 1;
+}
+
+
+/**
+ * Kill a container
+ */
+message KillContainerRequestProto {
+  required string id = 1;
+}
+
+/**
+ * Kill reply
+ */
+message KillContainerResponseProto {
+  required bool success = 1;
+}
+
+/**
+ * AM suicide
+ */
+message AMSuicideRequestProto {
+  required string text =  1;
+  required int32 signal = 2;
+  required int32 delay =  3;
+}
+
+/**
+ * AM suicide reply. For this to be returned implies
+ * a failure of the AM to kill itself
+ */
+message AMSuicideResponseProto {
+
+}
+
+
+/**
+ * Ask for the instance definition details
+ */
+message 

[48/66] [abbrv] hadoop git commit: Addendum patch for YARN-5610. Contributed by Gour Saha

2016-11-23 Thread jianhe
Addendum patch for YARN-5610. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebbf8868
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebbf8868
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebbf8868

Branch: refs/heads/yarn-native-services
Commit: ebbf88688d38dfee5cd7f7124c99dc8c89caa0fd
Parents: 9a7558f
Author: Jian He 
Authored: Wed Oct 12 13:33:09 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../yarn/services/resource/Application.java | 44 ++--
 .../services/resource/ApplicationState.java |  5 +++
 .../services/resource/ApplicationStatus.java|  8 ++--
 .../hadoop/yarn/services/resource/Artifact.java |  4 +-
 .../yarn/services/resource/Component.java   | 16 +++
 .../yarn/services/resource/Container.java   | 15 ---
 .../yarn/services/resource/ReadinessCheck.java  |  6 +--
 7 files changed, 54 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebbf8868/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
index cfcae95..719bf95 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Application.java
@@ -48,8 +48,8 @@ import com.fasterxml.jackson.annotation.JsonPropertyOrder;
 public class Application extends BaseResource {
   private static final long serialVersionUID = -4491694636566094885L;
 
-  private String id = null;
   private String name = null;
+  private String id = null;
   private Artifact artifact = null;
   private Resource resource = null;
   private String launchCommand = null;
@@ -63,25 +63,7 @@ public class Application extends BaseResource {
   private List containers = new ArrayList<>();
   private ApplicationState state = null;
   private Map quicklinks = null;
-  private String queue;
-
-  /**
-   * A unique application id.
-   **/
-  public Application id(String id) {
-this.id = id;
-return this;
-  }
-
-  @ApiModelProperty(example = "null", required = true, value = "A unique 
application id.")
-  @JsonProperty("id")
-  public String getId() {
-return id;
-  }
-
-  public void setId(String id) {
-this.id = id;
-  }
+  private String queue = null;
 
   /**
* A unique application name.
@@ -102,6 +84,24 @@ public class Application extends BaseResource {
   }
 
   /**
+   * A unique application id.
+   **/
+  public Application id(String id) {
+this.id = id;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "A unique application id.")
+  @JsonProperty("id")
+  public String getId() {
+return id;
+  }
+
+  public void setId(String id) {
+this.id = id;
+  }
+
+  /**
* Artifact of single-component applications. Mandatory if components
* attribute is not specified.
**/
@@ -423,8 +423,8 @@ public class Application extends BaseResource {
 sb.append("numberOfRunningContainers: ")
 .append(toIndentedString(numberOfRunningContainers)).append("\n");
 sb.append("lifetime: 
").append(toIndentedString(lifetime)).append("\n");
-sb.append("placementPolicy: ")
-.append(toIndentedString(placementPolicy)).append("\n");
+sb.append("placementPolicy: 
").append(toIndentedString(placementPolicy))
+.append("\n");
 sb.append("components: ").append(toIndentedString(components))
 .append("\n");
 sb.append("configuration: ").append(toIndentedString(configuration))

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebbf8868/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ApplicationState.java
 

[49/66] [abbrv] hadoop git commit: YARN-5770. Performance improvement of native-services REST API service. Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5770. Performance improvement of native-services REST API service. 
Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8610dcac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8610dcac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8610dcac

Branch: refs/heads/yarn-native-services
Commit: 8610dcacf7cf82c068482db01cfd8d66c45b37a8
Parents: 834e182
Author: Billie Rinaldi 
Authored: Wed Oct 26 08:34:39 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../api/impl/ApplicationApiService.java | 144 +--
 .../yarn/services/resource/Application.java |   7 +-
 .../yarn/services/resource/Container.java   |   4 +-
 .../services/webapp/ApplicationApiWebApp.java   |  12 +-
 .../org/apache/slider/client/SliderClient.java  |  25 ++--
 .../apache/slider/client/SliderClientAPI.java   |  11 ++
 6 files changed, 107 insertions(+), 96 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8610dcac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 73df4a1..cf43ac2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -20,8 +20,7 @@ package org.apache.hadoop.yarn.services.api.impl;
 import static org.apache.hadoop.yarn.services.utils.RestApiConstants.*;
 import static org.apache.hadoop.yarn.services.utils.RestApiErrorMessages.*;
 
-import java.io.File;
-import java.io.FileReader;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.security.PrivilegedExceptionAction;
@@ -36,7 +35,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.regex.Pattern;
 
-import javax.inject.Singleton;
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
@@ -52,6 +50,7 @@ import javax.ws.rs.core.Response.Status;
 
 import org.apache.commons.lang.SerializationUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -78,7 +77,6 @@ import org.apache.slider.common.params.ActionFlexArgs;
 import org.apache.slider.common.params.ActionFreezeArgs;
 import org.apache.slider.common.params.ActionListArgs;
 import org.apache.slider.common.params.ActionRegistryArgs;
-import org.apache.slider.common.params.ActionStatusArgs;
 import org.apache.slider.common.params.ActionThawArgs;
 import org.apache.slider.common.params.ComponentArgsDelegate;
 import org.apache.slider.common.tools.SliderUtils;
@@ -98,6 +96,7 @@ import com.google.gson.JsonElement;
 import com.google.gson.JsonNull;
 import com.google.gson.JsonObject;
 import com.google.gson.JsonParser;
+import com.google.inject.Singleton;
 
 @Singleton
 @Path(APPLICATIONS_API_RESOURCE_PATH)
@@ -109,6 +108,11 @@ public class ApplicationApiService implements 
ApplicationApi {
   private static org.apache.hadoop.conf.Configuration SLIDER_CONFIG;
   private static UserGroupInformation SLIDER_USER;
   private static SliderClient SLIDER_CLIENT;
+  private static Response SLIDER_VERSION;
+  private static final JsonParser JSON_PARSER = new JsonParser();
+  private static final JsonObject EMPTY_JSON_OBJECT = new JsonObject();
+  private static final ActionListArgs ACTION_LIST_ARGS = new ActionListArgs();
+  private static final ActionFreezeArgs ACTION_FREEZE_ARGS = new 
ActionFreezeArgs();
 
   static {
 init();
@@ -119,24 +123,27 @@ public class ApplicationApiService implements 
ApplicationApi {
 SLIDER_CONFIG = getSliderClientConfiguration();
 SLIDER_USER = getSliderUser();
 SLIDER_CLIENT = createSliderClient();
+SLIDER_VERSION = initSliderVersion();
   }
 
   @GET
-  @Path("/slider-version")
+  @Path("/versions/slider-version")
   @Consumes({ MediaType.APPLICATION_JSON })
   

[51/66] [abbrv] hadoop git commit: YARN-5775. Convert enums in swagger definition to uppercase. Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5775. Convert enums in swagger definition to uppercase. Contributed by 
Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b02a2f6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b02a2f6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b02a2f6d

Branch: refs/heads/yarn-native-services
Commit: b02a2f6d097a7040f6a9336c401fc6d330d8772f
Parents: 3dff896
Author: Billie Rinaldi 
Authored: Tue Oct 25 11:25:51 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml | 38 ++--
 1 file changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b02a2f6d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
index 6169fcd..7eb3196 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -241,10 +241,10 @@ definitions:
 type: string
 description: Artifact type, like docker, tarball, etc. (optional).
 enum:
-  - docker
-  - tarball
-  - application
-default: docker
+  - DOCKER
+  - TARBALL
+  - APPLICATION
+default: DOCKER
   uri:
 type: string
 description: Artifact location to support multiple artifact stores 
(optional).
@@ -303,7 +303,7 @@ definitions:
 type: string
 description: E.g. HTTP (YARN will perform a simple REST call at a 
regular interval and expect a 204 No content).
 enum:
-  - http
+  - HTTP
   uri:
 type: string
 description: Fully qualified REST uri endpoint.
@@ -335,13 +335,13 @@ definitions:
 type: string
 description: Config file in the standard format like xml, properties, 
json, yaml, template.
 enum:
-  - xml
-  - properties
-  - json
-  - yaml
-  - template
-  - env
-  - hadoop_xml
+  - XML
+  - PROPERTIES
+  - JSON
+  - YAML
+  - TEMPLATE
+  - ENV
+  - HADOOP_XML
   dest_file:
 type: string
 description: The absolute path that this configuration file should be 
mounted as, in the application container.
@@ -386,11 +386,11 @@ definitions:
 type: string
 description: enum of the state of the application
 enum:
-  - accepted
-  - started
-  - ready
-  - stopped
-  - failed
+  - ACCEPTED
+  - STARTED
+  - READY
+  - STOPPED
+  - FAILED
   ContainerState:
 description: The current state of the container of an application.
 properties:
@@ -398,8 +398,8 @@ definitions:
 type: string
 description: enum of the state of the container
 enum:
-  - init
-  - ready
+  - INIT
+  - READY
   ApplicationStatus:
 description: The current status of a submitted application, returned as a 
response to the GET API.
 properties:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
new file mode 100644
index 000..30f6ba9
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClientAPI.java
@@ -0,0 +1,368 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.client;
+
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.api.types.NodeInformationList;
+import org.apache.slider.api.types.SliderInstanceDescription;
+import org.apache.slider.common.params.AbstractClusterBuildingActionArgs;
+import org.apache.slider.common.params.ActionAMSuicideArgs;
+import org.apache.slider.common.params.ActionClientArgs;
+import org.apache.slider.common.params.ActionDependencyArgs;
+import org.apache.slider.common.params.ActionDestroyArgs;
+import org.apache.slider.common.params.ActionDiagnosticArgs;
+import org.apache.slider.common.params.ActionEchoArgs;
+import org.apache.slider.common.params.ActionFlexArgs;
+import org.apache.slider.common.params.ActionFreezeArgs;
+import org.apache.slider.common.params.ActionInstallKeytabArgs;
+import org.apache.slider.common.params.ActionInstallPackageArgs;
+import org.apache.slider.common.params.ActionKeytabArgs;
+import org.apache.slider.common.params.ActionNodesArgs;
+import org.apache.slider.common.params.ActionPackageArgs;
+import org.apache.slider.common.params.ActionKillContainerArgs;
+import org.apache.slider.common.params.ActionListArgs;
+import org.apache.slider.common.params.ActionRegistryArgs;
+import org.apache.slider.common.params.ActionResolveArgs;
+import org.apache.slider.common.params.ActionResourceArgs;
+import org.apache.slider.common.params.ActionStatusArgs;
+import org.apache.slider.common.params.ActionThawArgs;
+import org.apache.slider.common.params.ActionUpgradeArgs;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.providers.AbstractClientProvider;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Interface of those method calls in the slider API that are intended
+ * for direct public invocation.
+ * 
+ * Stability: evolving
+ */
+public interface SliderClientAPI extends Service {
+  /**
+   * Destroy a cluster. There's two race conditions here
+   * #1 the cluster is started between verifying that there are no live
+   * clusters of that name.
+   */
+  int actionDestroy(String clustername, ActionDestroyArgs destroyArgs)
+  throws YarnException, IOException;
+
+  int actionDestroy(String clustername) throws YarnException,
+  IOException;
+
+  /**
+   * AM to commit an asynchronous suicide
+   */
+  int actionAmSuicide(String clustername,
+  ActionAMSuicideArgs args) throws YarnException, IOException;
+
+  /**
+   * Get the provider for this cluster
+   * @param provider the name of the provider
+   * @return the provider instance
+   * @throws SliderException problems building the provider
+   */
+  AbstractClientProvider createClientProvider(String provider)
+throws SliderException;
+
+  /**
+   * Build up the cluster specification/directory
+   *
+   * @param clustername cluster name
+   * @param buildInfo the arguments needed to build the cluster
+   * @throws YarnException Yarn problems
+   * @throws IOException other 

[10/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
new file mode 100644
index 000..a8aa1a2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppStateBindingInfo.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.state;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.providers.ProviderRole;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Binding information for application states; designed to be extensible
+ * so that tests don't have to be massivley reworked when new arguments
+ * are added.
+ */
+public class AppStateBindingInfo {
+  public AggregateConf instanceDefinition;
+  public Configuration serviceConfig = new Configuration();
+  public Configuration publishedProviderConf = new Configuration(false);
+  public List roles = new ArrayList<>();
+  public FileSystem fs;
+  public Path historyPath;
+  public List liveContainers = new ArrayList<>(0);
+  public Map applicationInfo = new HashMap<>();
+  public ContainerReleaseSelector releaseSelector = new 
SimpleReleaseSelector();
+  /** node reports off the RM. */
+  public List nodeReports = new ArrayList<>(0);
+
+  public void validate() throws IllegalArgumentException {
+Preconditions.checkArgument(instanceDefinition != null, "null 
instanceDefinition");
+Preconditions.checkArgument(serviceConfig != null, "null appmasterConfig");
+Preconditions.checkArgument(publishedProviderConf != null, "null 
publishedProviderConf");
+Preconditions.checkArgument(releaseSelector != null, "null 
releaseSelector");
+Preconditions.checkArgument(roles != null, "null providerRoles");
+Preconditions.checkArgument(fs != null, "null fs");
+Preconditions.checkArgument(historyPath != null, "null historyDir");
+Preconditions.checkArgument(nodeReports != null, "null nodeReports");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerAllocationOutcome.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerAllocationOutcome.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerAllocationOutcome.java
new file mode 100644
index 000..5b3a93c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ContainerAllocationOutcome.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF 

[12/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/RpcBinder.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/RpcBinder.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/RpcBinder.java
new file mode 100644
index 000..dd4785d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/rpc/RpcBinder.java
@@ -0,0 +1,310 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.rpc;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.ProtocolProxy;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcEngine;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.slider.api.SliderClusterProtocol;
+import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.tools.Duration;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.ServiceNotReadyException;
+import org.apache.slider.core.exceptions.SliderException;
+
+import static org.apache.slider.common.SliderXmlConfKeys.*; 
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+
+public class RpcBinder {
+  protected static final Logger log =
+LoggerFactory.getLogger(RpcBinder.class);
+
+  /**
+   * Create a protobuf server bonded to the specific socket address
+   * @param addr address to listen to; 0.0.0.0 as hostname acceptable
+   * @param conf config
+   * @param secretManager token secret handler
+   * @param numHandlers threads to service requests
+   * @param blockingService service to handle
+   * @param portRangeConfig range of ports
+   * @return the IPC server itself
+   * @throws IOException
+   */
+  public static Server createProtobufServer(InetSocketAddress addr,
+Configuration conf,
+SecretManager secretManager,
+int numHandlers,
+BlockingService blockingService,
+String portRangeConfig) throws
+  IOException {
+Class sliderClusterAPIClass = registerSliderAPI(
+conf);
+RPC.Server server = new 
RPC.Builder(conf).setProtocol(sliderClusterAPIClass)
+ .setInstance(blockingService)
+   

[21/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/restclient/UgiJerseyBinding.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/restclient/UgiJerseyBinding.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/restclient/UgiJerseyBinding.java
new file mode 100644
index 000..bf71861
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/restclient/UgiJerseyBinding.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.restclient;
+
+import com.google.common.base.Preconditions;
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+import org.apache.hadoop.conf.Configuration;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.slider.core.exceptions.ExceptionConverter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.URL;
+
+/**
+ * Class to bond to a Jersey client, for UGI integration and SPNEGO.
+ * 
+ *   Usage: create an instance, then when creating a Jersey Client
+ *   pass in to the constructor the handler provided by {@link #getHandler()}
+ *
+ * see https://jersey.java.net/apidocs/1.17/jersey/com/sun/jersey/client/urlconnection/HttpURLConnectionFactory.html;>Jersey
 docs
+ */
+public class UgiJerseyBinding implements
+HttpURLConnectionFactory {
+  private static final Logger log =
+  LoggerFactory.getLogger(UgiJerseyBinding.class);
+
+  private final UrlConnectionOperations operations;
+  private final URLConnectionClientHandler handler;
+
+  /**
+   * Construct an instance
+   * @param operations operations instance
+   */
+  @SuppressWarnings("ThisEscapedInObjectConstruction")
+  public UgiJerseyBinding(UrlConnectionOperations operations) {
+Preconditions.checkArgument(operations != null, "Null operations");
+this.operations = operations;
+handler = new URLConnectionClientHandler(this);
+  }
+
+  /**
+   * Create an instance off the configuration. The SPNEGO policy
+   * is derived from the current UGI settings.
+   * @param conf config
+   */
+  public UgiJerseyBinding(Configuration conf) {
+this(new UrlConnectionOperations(conf));
+  }
+
+  /**
+   * Get a URL connection. 
+   * @param url URL to connect to
+   * @return the connection
+   * @throws IOException any problem. {@link AuthenticationException} 
+   * errors are wrapped
+   */
+  @Override
+  public HttpURLConnection getHttpURLConnection(URL url) throws IOException {
+try {
+  // open a connection handling status codes and so redirections
+  // but as it opens a connection, it's less useful than you think.
+
+  return operations.openConnection(url);
+} catch (AuthenticationException e) {
+  throw new IOException(e);
+}
+  }
+
+  public UrlConnectionOperations getOperations() {
+return operations;
+  }
+
+  public URLConnectionClientHandler getHandler() {
+return handler;
+  }
+  
+  /**
+   * Get the SPNEGO flag (as found in the operations instance
+   * @return the spnego policy
+   */
+  public boolean isUseSpnego() {
+return operations.isUseSpnego();
+  }
+
+
+  /**
+   * Uprate error codes 400 and up into faults; 
+   * 
+   * see {@link ExceptionConverter#convertJerseyException(String, String, 
UniformInterfaceException)}
+   */
+  public static 

[08/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
new file mode 100644
index 000..ad91183
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/StateAccessForProviders.java
@@ -0,0 +1,313 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.state;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.StatusKeys;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.api.types.ComponentInformation;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.api.types.RoleStatistics;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.exceptions.NoSuchNodeException;
+import org.apache.slider.core.registry.docstore.PublishedConfigSet;
+import org.apache.slider.core.registry.docstore.PublishedExportsSet;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * The methods to offer state access to the providers and other parts of
+ * the system which want read-only access to the state.
+ */
+public interface StateAccessForProviders {
+
+  /**
+   * Get a map of role status entries by role Id
+   * @return the map of currently defined roles.
+   */
+  Map getRoleStatusMap();
+
+  /**
+   * Get the name of the application
+   * @return the name
+   */
+  String getApplicationName();
+
+  /**
+   * Get the published configurations
+   * @return the configuration set
+   */
+  PublishedConfigSet getPublishedSliderConfigurations();
+
+  /**
+   * Get the published exports set
+   * @return
+   */
+  PublishedExportsSet getPublishedExportsSet();
+
+  /**
+   * Get a named published config set
+   * @param name name to look up
+   * @return the instance or null
+   */
+  PublishedConfigSet getPublishedConfigSet(String name);
+
+  /**
+   * Get a named published config set, creating it if need be.
+   * @param name name to look up
+   * @return the instance -possibly a new one
+   */
+  PublishedConfigSet getOrCreatePublishedConfigSet(String name);
+
+  /**
+   * List the config sets -this takes a clone of the current set
+   * @return a list of config sets
+   */
+  List listConfigSets();
+
+  /**
+   * Get a map of all the failed containers
+   * @return map of recorded failed containers
+   */
+  Map getFailedContainers();
+
+  /**
+   * Get the live containers.
+   * 
+   * @return the live nodes
+   */
+  Map getLiveContainers();
+
+  /**
+   * Get the current cluster description 
+   * @return the actual state of the cluster
+   */
+  ClusterDescription getClusterStatus();
+
+  /**
+   * Get at the snapshot of the resource config
+   * Changes here do not affect the application state.
+   * @return the most recent settings
+   */
+  ConfTreeOperations getResourcesSnapshot();
+
+  /**
+   * Get at the snapshot of the appconf config
+   * Changes here do not affect the application state.
+   * @return the most recent settings
+   */
+  ConfTreeOperations getAppConfSnapshot();
+
+  /**
+   * Get at the snapshot of the internals config.
+   * Changes here do not 

[65/66] [abbrv] hadoop git commit: YARN-5701. Fix issues in yarn native services apps-of-apps. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5701. Fix issues in yarn native services apps-of-apps. Contributed by 
Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebd63925
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebd63925
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebd63925

Branch: refs/heads/yarn-native-services
Commit: ebd639257e3cc1f33235612aadc7ee653141d663
Parents: 6bbd675
Author: Jian He 
Authored: Sun Oct 16 17:01:09 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../org/apache/slider/client/SliderClient.java  | 72 
 .../slider/core/buildutils/InstanceBuilder.java |  4 +
 .../apache/slider/providers/ProviderUtils.java  | 62 --
 .../providers/docker/DockerClientProvider.java  |  4 +-
 .../providers/docker/DockerProviderService.java | 87 
 5 files changed, 164 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebd63925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 2840c4b..94e51e5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -178,6 +178,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.BufferedReader;
+import java.io.Console;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
@@ -918,57 +919,56 @@ public class SliderClient extends 
AbstractSliderLaunchedService implements RunSe
   return;
 }
 
-BufferedReader br = null;
-try {
-  for (Entry cred : tree.credentials.entrySet()) {
-String provider = cred.getKey()
-.replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName)
-.replaceAll(Pattern.quote("${CLUSTER}"), clusterName);
-List aliases = cred.getValue();
-if (aliases == null || aliases.isEmpty()) {
-  continue;
-}
-Configuration c = new Configuration(conf);
-c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider);
-CredentialProvider credentialProvider = 
CredentialProviderFactory.getProviders(c).get(0);
-Set existingAliases = new 
HashSet<>(credentialProvider.getAliases());
-for (String alias : aliases) {
-  if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) {
-log.info("Credentials for " + alias + " found in " + provider);
-  } else {
-if (br == null) {
-  br = new BufferedReader(new InputStreamReader(System.in));
-}
-char[] pass = readPassword(alias, br);
-credentialProvider.createCredentialEntry(alias, pass);
-credentialProvider.flush();
-Arrays.fill(pass, ' ');
+Console console = System.console();
+for (Entry cred : tree.credentials.entrySet()) {
+  String provider = cred.getKey()
+  .replaceAll(Pattern.quote("${CLUSTER_NAME}"), clusterName)
+  .replaceAll(Pattern.quote("${CLUSTER}"), clusterName);
+  List aliases = cred.getValue();
+  if (aliases == null || aliases.isEmpty()) {
+continue;
+  }
+  Configuration c = new Configuration(conf);
+  c.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, provider);
+  CredentialProvider credentialProvider = 
CredentialProviderFactory.getProviders(c).get(0);
+  Set existingAliases = new 
HashSet<>(credentialProvider.getAliases());
+  for (String alias : aliases) {
+if (existingAliases.contains(alias.toLowerCase(Locale.ENGLISH))) {
+  log.info("Credentials for " + alias + " found in " + provider);
+} else {
+  if (console == null) {
+throw new IOException("Unable to input password for " + alias +
+" because System.console() is null; provider " + provider +
+" must be populated manually");
   }
+  char[] pass = readPassword(alias, console);
+  

[28/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
new file mode 100644
index 000..5140059
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ComponentArgsDelegate.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.params;
+
+import com.beust.jcommander.Parameter;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class ComponentArgsDelegate extends AbstractArgsDelegate {
+
+  /**
+   * This is a listing of the roles to create
+   */
+  @Parameter(names = {ARG_COMPONENT,  ARG_COMPONENT_SHORT, ARG_ROLE},
+ arity = 2,
+ description = "--component   e.g. +1 incr by 1, -2 
decr by 2, and 3 makes final count 3",
+ splitter = DontSplitArguments.class)
+  public List componentTuples = new ArrayList<>(0);
+
+
+  /**
+   * Get the role mapping (may be empty, but never null)
+   * @return role mapping
+   * @throws BadCommandArgumentsException parse problem
+   */
+  public Map getComponentMap() throws 
BadCommandArgumentsException {
+return convertTupleListToMap("component", componentTuples);
+  }
+
+  public List getComponentTuples() {
+return componentTuples;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/DontSplitArguments.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/DontSplitArguments.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/DontSplitArguments.java
new file mode 100644
index 000..0344305
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/DontSplitArguments.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.params;
+
+import com.beust.jcommander.converters.IParameterSplitter;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class DontSplitArguments implements IParameterSplitter {
+
+  @Override
+  public List split(String value) {
+List list = new ArrayList<>(1);
+list.add(value);
+return list;
+  }
+}


[50/66] [abbrv] hadoop git commit: YARN-5735. Make the service REST API use the app timeout feature YARN-4205. Contributed by Jian He

2016-11-23 Thread jianhe
YARN-5735. Make the service REST API use the app timeout feature YARN-4205. 
Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bbd6755
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bbd6755
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bbd6755

Branch: refs/heads/yarn-native-services
Commit: 6bbd6755c2e1821b21d179561cb109e1c62c4637
Parents: 913b242
Author: Gour Saha 
Authored: Fri Oct 14 17:40:51 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../api/impl/ApplicationApiService.java | 10 --
 .../org/apache/slider/client/SliderClient.java  | 33 ++--
 .../AbstractClusterBuildingActionArgs.java  |  5 +++
 .../slider/common/params/ActionThawArgs.java|  6 
 .../apache/slider/common/params/Arguments.java  |  1 +
 5 files changed, 36 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bbd6755/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 21cf113..73df4a1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -347,7 +347,7 @@ public class ApplicationApiService implements 
ApplicationApi {
 if (queueName != null && queueName.trim().length() > 0) {
   createArgs.queue = queueName.trim();
 }
-
+createArgs.lifetime = application.getLifetime();
 return invokeSliderClientRunnable(new 
SliderClientContextRunnable() {
   @Override
   public String run(SliderClient sliderClient) throws YarnException,
@@ -1246,13 +1246,17 @@ public class ApplicationApiService implements 
ApplicationApi {
 });
   }
 
-  private Response startSliderApplication(final String appName)
+  private Response startSliderApplication(final String appName, Application 
app)
   throws IOException, YarnException, InterruptedException {
 return invokeSliderClientRunnable(new 
SliderClientContextRunnable() {
   @Override
   public Response run(SliderClient sliderClient) throws YarnException,
   IOException, InterruptedException {
 ActionThawArgs thawArgs = new ActionThawArgs();
+if (app.getLifetime() == null) {
+  app.setLifetime(DEFAULT_UNLIMITED_LIFETIME);
+}
+thawArgs.lifetime = app.getLifetime();
 int returnCode = sliderClient.actionThaw(appName, thawArgs);
 if (returnCode == 0) {
   logger.info("Successfully started application {}", appName);
@@ -1344,7 +1348,7 @@ public class ApplicationApiService implements 
ApplicationApi {
   try {
 int livenessCheck = getSliderList(appName);
 if (livenessCheck != 0) {
-  return startSliderApplication(appName);
+  return startSliderApplication(appName, updateAppData);
 } else {
   logger.info("Application {} is already running", appName);
   ApplicationStatus applicationStatus = new ApplicationStatus();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bbd6755/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index fe4f1d2..2840c4b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.util.Shell;
 

[09/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
new file mode 100644
index 000..37e9a7f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/ProviderAppState.java
@@ -0,0 +1,307 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.state;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.api.types.ComponentInformation;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.api.types.RoleStatistics;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.exceptions.NoSuchNodeException;
+import org.apache.slider.core.registry.docstore.PublishedConfigSet;
+import org.apache.slider.core.registry.docstore.PublishedExportsSet;
+import org.apache.slider.server.appmaster.web.rest.RestPaths;
+import org.apache.slider.server.services.utility.PatternValidator;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Implementation of {@link StateAccessForProviders}, which means
+ * state access for providers, web UI and IPC/REST views.
+ */
+public class ProviderAppState implements StateAccessForProviders {
+
+
+  private final Map publishedConfigSets =
+  new ConcurrentHashMap<>(5);
+  private final PublishedExportsSet publishedExportsSets = new 
PublishedExportsSet();
+  private static final PatternValidator validator = new PatternValidator(
+  RestPaths.PUBLISHED_CONFIGURATION_SET_REGEXP);
+  private String applicationName;
+
+  private final AppState appState;
+
+  public ProviderAppState(String applicationName, AppState appState) {
+this.appState = appState;
+this.applicationName = applicationName;
+  }
+
+  public void setApplicationName(String applicationName) {
+this.applicationName = applicationName;
+  }
+
+  @Override
+  public String getApplicationName() {
+return applicationName;
+  }
+
+  @Override
+  public PublishedConfigSet getPublishedSliderConfigurations() {
+return getOrCreatePublishedConfigSet(RestPaths.SLIDER_CONFIGSET);
+  }
+
+  @Override
+  public PublishedExportsSet getPublishedExportsSet() {
+return publishedExportsSets;
+  }
+
+  @Override
+  public PublishedConfigSet getPublishedConfigSet(String name) {
+return publishedConfigSets.get(name);
+  }
+
+  @Override
+  public PublishedConfigSet getOrCreatePublishedConfigSet(String name) {
+PublishedConfigSet set = publishedConfigSets.get(name);
+if (set == null) {
+  validator.validate(name);
+  synchronized (publishedConfigSets) {
+// synchronized double check to ensure that there is never an 
overridden
+// config set created
+set = publishedConfigSets.get(name);
+if (set == null) {
+  set = new PublishedConfigSet();
+  publishedConfigSets.put(name, set);
+}
+  }
+}
+return set;
+  }
+
+  @Override
+  public List listConfigSets() {
+
+synchronized (publishedConfigSets) {
+  List sets = new 

[32/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
new file mode 100644
index 000..8210f4d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -0,0 +1,4569 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+*/
+
+package org.apache.slider.client;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.io.Files;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathNotFoundException;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.registry.client.exceptions.NoRecordException;
+import org.apache.hadoop.registry.client.types.Endpoint;
+import org.apache.hadoop.registry.client.types.RegistryPathStatus;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.KerberosDiags;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.alias.CredentialProvider;
+import org.apache.hadoop.security.alias.CredentialProviderFactory;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.SliderApplicationApi;
+import org.apache.slider.api.SliderClusterProtocol;
+import org.apache.slider.api.StateValues;
+import org.apache.slider.api.proto.Messages;
+import org.apache.slider.api.types.ContainerInformation;
+import org.apache.slider.api.types.NodeInformationList;
+import org.apache.slider.api.types.SliderInstanceDescription;
+import org.apache.slider.client.ipc.SliderApplicationIpcClient;
+import org.apache.slider.client.ipc.SliderClusterOperations;
+import org.apache.slider.common.Constants;
+import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderKeys;

[42/66] [abbrv] hadoop git commit: YARN-5513. Move Java only tests from slider develop to yarn-native-services. Contributed by Gour Saha

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a89750/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java
new file mode 100644
index 000..7eaaefe
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/test/ContractTestUtils.java
@@ -0,0 +1,901 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.test;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.internal.AssumptionViolatedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.Properties;
+import java.util.UUID;
+
+/**
+ * Utilities used across test cases to make assertions about filesystems
+ * -assertions which fail with useful information.
+ * This is lifted from Hadoop common Test; that JAR isn't published, so
+ * we have to make do.
+ */
+public class ContractTestUtils extends Assert {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ContractTestUtils.class);
+
+  public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
+
+  // For scale testing, we can repeatedly write small chunk data to generate
+  // a large file.
+  public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
+  public static final int DEFAULT_IO_CHUNK_BUFFER_SIZE = 128;
+  public static final String IO_CHUNK_MODULUS_SIZE = "io.chunk.modulus.size";
+  public static final int DEFAULT_IO_CHUNK_MODULUS_SIZE = 128;
+
+  /**
+   * Assert that a property in the property set matches the expected value
+   * @param props property set
+   * @param key property name
+   * @param expected expected value. If null, the property must not be in the 
set
+   */
+  public static void assertPropertyEquals(Properties props,
+  String key,
+  String expected) {
+String val = props.getProperty(key);
+if (expected == null) {
+  assertNull("Non null property " + key + " = " + val, val);
+} else {
+  assertEquals("property " + key + " = " + val,
+  expected,
+  val);
+}
+  }
+
+  /**
+   *
+   * Write a file and read it in, validating the result. Optional flags control
+   * whether file overwrite operations should be enabled, and whether the
+   * file should be deleted afterwards.
+   *
+   * If there is a mismatch between what was written and what was expected,
+   * a small range of bytes either side of the first error are logged to aid
+   * diagnosing what problem occurred -whether it was a previous file
+   * or a corrupting of the current file. This assumes that two
+   * sequential runs to the same path use datasets with different character
+   * moduli.
+   *
+   * @param fs filesystem
+   * @param path path to write to
+   * @param len length of data
+   * @param overwrite should the create option allow overwrites?
+   * @param delete should the file be deleted afterwards? -with a verification
+   * that it worked. Deletion is not attempted if an assertion has failed
+   * earlier -it is not in a finally{} block.
+   * @throws IOException IO problems
+   */
+  public static void writeAndRead(FileSystem fs,
+  Path path,
+   

[13/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/management/MetricsAndMonitoring.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/management/MetricsAndMonitoring.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/management/MetricsAndMonitoring.java
new file mode 100644
index 000..37a8935
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/management/MetricsAndMonitoring.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.management;
+
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.MetricSet;
+import com.codahale.metrics.health.HealthCheckRegistry;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.CompositeService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * Class for all metrics and monitoring
+ */
+public class MetricsAndMonitoring extends CompositeService {
+  protected static final Logger log =
+LoggerFactory.getLogger(MetricsAndMonitoring.class);
+  public MetricsAndMonitoring(String name) {
+super(name);
+  }
+  
+  public MetricsAndMonitoring() {
+super("MetricsAndMonitoring");
+  }
+  
+  /**
+   * Singleton of metrics registry
+   */
+  final MetricRegistry metrics = new MetricRegistry();
+
+  final HealthCheckRegistry health = new HealthCheckRegistry();
+
+  private final Map meterAndCounterMap
+  = new ConcurrentHashMap<>();
+
+  private final List metricSets = new ArrayList<>();
+
+  /**
+   * List of recorded events
+   */
+  private final List eventHistory = new ArrayList<>(100);
+
+  public static final int EVENT_LIMIT = 1000;
+
+  public MetricRegistry getMetrics() {
+return metrics;
+  }
+
+  public HealthCheckRegistry getHealth() {
+return health;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+addService(new MetricsBindingService("MetricsBindingService",
+metrics));
+super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+super.serviceStop();
+for (MetricSet set : metricSets) {
+  unregister(set);
+}
+  }
+
+  public MeterAndCounter getMeterAndCounter(String name) {
+return meterAndCounterMap.get(name);
+  }
+
+  /**
+   * Get or create the meter/counter pair
+   * @param name name of instance
+   * @return an instance
+   */
+  public MeterAndCounter getOrCreateMeterAndCounter(String name) {
+MeterAndCounter instance = meterAndCounterMap.get(name);
+if (instance == null) {
+  synchronized (this) {
+// check in a sync block
+instance = meterAndCounterMap.get(name);
+if (instance == null) {
+  instance = new MeterAndCounter(metrics, name);
+  meterAndCounterMap.put(name, instance);
+}
+  }
+}
+return instance;
+  }
+
+  /**
+   * Get a specific meter and mark it. This will create and register it on 
demand.
+   * @param name name of meter/counter
+   */
+  public void markMeterAndCounter(String name) {
+MeterAndCounter meter = getOrCreateMeterAndCounter(name);
+meter.mark();
+  }
+
+  /**
+   * Given a {@link Metric}, registers it under the given name.
+   *
+   * @param name   the name of the metric
+   * @param metric the metric
+   * @param the type of the metric
+   * @return {@code metric}
+   * @throws IllegalArgumentException if the name is already registered
+   */
+  public  T register(String name, 

[17/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Component.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Component.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Component.java
new file mode 100644
index 000..78bb8c1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/application/metadata/Component.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.agent.application.metadata;
+
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.codehaus.jackson.annotate.JsonProperty;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Component defined in master package metainfo.json
+ */
+public class Component extends AbstractComponent {
+
+  String category = CATEGORY_MASTER;
+  String publishConfig = Boolean.FALSE.toString();
+  String minInstanceCount = "0";
+  String maxInstanceCount;
+  String autoStartOnFailure = Boolean.FALSE.toString();
+  String appExports;
+  String compExports;
+  String type = TYPE_STANDARD;
+  List componentExports = new ArrayList<>();
+  List dockerContainers = new ArrayList<>();
+  List configFiles = new ArrayList<>();
+
+  public Component() {
+  }
+
+  public String getType() {
+return type;
+  }
+
+  public void setType(String type) {
+this.type = type;
+  }
+
+  public String getCategory() {
+return category;
+  }
+
+  public void setCategory(String category) {
+this.category = category;
+  }
+
+  public String getPublishConfig() {
+return publishConfig;
+  }
+
+  public void setPublishConfig(String publishConfig) {
+this.publishConfig = publishConfig;
+  }
+
+  public String getAutoStartOnFailure() {
+return autoStartOnFailure;
+  }
+
+  public void setAutoStartOnFailure(String autoStartOnFailure) {
+this.autoStartOnFailure = autoStartOnFailure;
+  }
+
+  public String getAppExports() {
+return appExports;
+  }
+
+  public void setAppExports(String appExports) {
+this.appExports = appExports;
+  }
+
+  public String getCompExports() {
+return compExports;
+  }
+
+  public void setCompExports(String compExports) {
+this.compExports = compExports;
+  }
+
+  public String getMinInstanceCount() {
+return minInstanceCount;
+  }
+  
+  @JsonProperty("dockerContainers")
+  public List getDockerContainers() {
+ return this.dockerContainers;
+  }
+  
+  public Boolean getAutoStartOnFailureBoolean() {
+if (SliderUtils.isUnset(getAutoStartOnFailure())) {
+  return Boolean.FALSE;
+}
+
+return Boolean.parseBoolean(getAutoStartOnFailure());
+  }
+
+  public int getMinInstanceCountInt() throws BadConfigException {
+if (SliderUtils.isUnset(minInstanceCount)) {
+  return 0;
+}
+
+try {
+  return Integer.parseInt(minInstanceCount);
+} catch (NumberFormatException nfe) {
+  throw new BadConfigException(nfe, "Invalid value for minInstanceCount 
for %s", name);
+}
+  }
+
+  public int getMaxInstanceCountInt() throws BadConfigException {
+if (SliderUtils.isUnset(maxInstanceCount)) {
+  return Integer.MAX_VALUE;
+}
+
+try {
+  return Integer.parseInt(maxInstanceCount);
+} catch (NumberFormatException nfe) {
+  throw new BadConfigException(nfe, "Invalid value for maxInstanceCount 
for %s", name);
+}
+  }
+
+  public void setMinInstanceCount(String minInstanceCount) {
+this.minInstanceCount = minInstanceCount;
+  }
+
+  public String getMaxInstanceCount() {
+return 

[22/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/YarnAppListClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/YarnAppListClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/YarnAppListClient.java
new file mode 100644
index 000..1bdfb9c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/YarnAppListClient.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.registry;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.slider.client.SliderYarnClientImpl;
+import org.apache.slider.api.types.SliderInstanceDescription;
+import org.apache.slider.common.tools.CoreFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Client code for interacting with a list of service instances.
+ * The initial logic just enumerates service instances in the YARN RM
+ */
+public class YarnAppListClient {
+
+  private final SliderYarnClientImpl yarnClient;
+  private final String username;
+  private final Configuration conf;
+  private static final Logger log =
+  LoggerFactory.getLogger(YarnAppListClient.class);
+
+  public YarnAppListClient(SliderYarnClientImpl yarnClient,
+  String username,
+  Configuration conf) {
+
+Preconditions.checkArgument(yarnClient != null,
+"yarn client is null: is app inited?");
+Preconditions.checkArgument(username != null,
+"username is null");
+Preconditions.checkArgument(conf != null,
+"conf parameter is null");
+this.yarnClient = yarnClient;
+this.username = username;
+this.conf = conf;
+  }
+
+  /**
+   * find all live instances of a specific app -if there is more than one 
+   * in the cluster, this returns them all. State should be running or earlier
+   * in the lifecycle
+   * @param appname application name
+   * @return the list of all matching application instances
+   */
+  public List findAllLiveInstances(String appname)
+throws YarnException, IOException {
+return yarnClient.findAllLiveInstances(username, appname);
+  }
+
+
+  /**
+   * Find an instance of a application belong to the current user
+   * @param appname application name
+   * @return the app report or null if none is found
+   * @throws YarnException YARN issues
+   * @throws IOException IO problems
+   */
+  public ApplicationReport findInstance(String appname) throws
+YarnException,
+IOException {
+List instances = listInstances(null);
+return yarnClient.findClusterInInstanceList(instances, appname);
+  }
+
+  /**
+   * List instances belonging to the specific user
+   * @return a possibly empty list of AMs
+   */
+  public List listInstances()
+  throws YarnException, IOException {
+return listInstances(null);
+  }
+
+  /**
+   * List instances belonging to a specific user
+   * @return a possibly empty list of AMs
+   * @param user user if not the default. null means default, "" means all 
users, 
+   * otherwise it is the name of a user
+   */
+  public List listInstances(String user)
+  throws YarnException, IOException {
+String listUser = user == null ? username : user;
+

[43/66] [abbrv] hadoop git commit: YARN-5513. Move Java only tests from slider develop to yarn-native-services. Contributed by Gour Saha

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a89750/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java
new file mode 100644
index 000..7fceac7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestAgentProviderService.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.publisher;
+
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.providers.agent.AgentProviderService;
+import org.apache.slider.server.appmaster.actions.QueueAccess;
+import org.apache.slider.server.appmaster.state.StateAccessForProviders;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ *
+ */
+public class TestAgentProviderService extends AgentProviderService {
+  protected static final Logger log =
+  LoggerFactory.getLogger(TestAgentProviderService.class);
+
+  public TestAgentProviderService() {
+super();
+log.info("TestAgentProviderService created");
+  }
+
+  @Override
+  public void bind(StateAccessForProviders stateAccessor,
+  QueueAccess queueAccess,
+  List liveContainers) {
+super.bind(stateAccessor, queueAccess, liveContainers);
+Map dummyProps = new HashMap();
+dummyProps.put("prop1", "val1");
+dummyProps.put("prop2", "val2");
+log.info("publishing dummy-site.xml with values {}", dummyProps);
+publishApplicationInstanceData("dummy-site", "dummy configuration",
+   dummyProps.entrySet());
+// publishing global config for testing purposes
+publishApplicationInstanceData("global", "global configuration",
+   stateAccessor.getAppConfSnapshot()
+   .getGlobalOptions().entrySet());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a89750/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestSliderProviderFactory.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestSliderProviderFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestSliderProviderFactory.java
new file mode 100644
index 000..f49e15a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/test/java/org/apache/slider/server/appmaster/web/rest/publisher/TestSliderProviderFactory.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * 

[52/66] [abbrv] hadoop git commit: YARN-5680. Add 2 new fields in Slider status output - image-name and is-privileged-container. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5680. Add 2 new fields in Slider status output - image-name and 
is-privileged-container. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/834e1821
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/834e1821
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/834e1821

Branch: refs/heads/yarn-native-services
Commit: 834e1821e044c74f07c33d9bf5fac1ef7f67df96
Parents: b02a2f6
Author: Gour Saha 
Authored: Tue Oct 25 20:00:27 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../org/apache/slider/providers/docker/DockerKeys.java  |  1 +
 .../slider/providers/docker/DockerProviderService.java  |  2 +-
 .../apache/slider/server/appmaster/state/AppState.java  | 12 
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e1821/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
index 40b73a2..0e1d288 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerKeys.java
@@ -26,6 +26,7 @@ public interface DockerKeys {
   String DOCKER_START_COMMAND = DOCKER_PREFIX + "startCommand";
 
   String DEFAULT_DOCKER_NETWORK = "bridge";
+  Boolean DEFAULT_DOCKER_USE_PRIVILEGED = false;
 
   String OUT_FILE = "stdout.txt";
   String ERR_FILE = "stderr.txt";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e1821/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
index af36620..cc319ee 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/docker/DockerProviderService.java
@@ -131,7 +131,7 @@ public class DockerProviderService extends 
AbstractProviderService implements
 launcher.setDockerNetwork(appConf.getComponentOpt(roleGroup, 
DOCKER_NETWORK,
 DEFAULT_DOCKER_NETWORK));
 launcher.setRunPrivilegedContainer(appConf.getComponentOptBool(roleGroup,
-DOCKER_USE_PRIVILEGED, false));
+DOCKER_USE_PRIVILEGED, DEFAULT_DOCKER_USE_PRIVILEGED));
 
 // Set the environment
 Map standardTokens = providerUtils.getStandardTokenMap(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/834e1821/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
index 49e7b78..6db375d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -91,6 +91,9 @@ 

[06/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/RegistrationResponse.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/RegistrationResponse.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/RegistrationResponse.java
new file mode 100644
index 000..80b7a5e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/RegistrationResponse.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.agent;
+
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.util.List;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class RegistrationResponse {
+
+  @JsonProperty("response")
+  private RegistrationStatus response;
+
+  /**
+   * exitstatus is a code of error which was rised on server side. exitstatus
+   * = 0 (OK - Default) exitstatus = 1 (Registration failed because different
+   * version of agent and server)
+   */
+  @JsonProperty("exitstatus")
+  private int exitstatus;
+
+  /** log - message, which will be printed to agents log */
+  @JsonProperty("log")
+  private String log;
+
+  /** tags - tags associated with the container */
+  @JsonProperty("tags")
+  private String tags;
+  
+  @JsonProperty("package")
+  private String pkg;
+
+  //Response id to start with, usually zero.
+  @JsonProperty("responseId")
+  private long responseId;
+
+  @JsonProperty("statusCommands")
+  private List statusCommands = null;
+
+  public RegistrationResponse() {
+  }
+
+  public RegistrationStatus getResponse() {
+return response;
+  }
+
+  public void setResponse(RegistrationStatus response) {
+this.response = response;
+  }
+
+  public int getExitstatus() {
+return exitstatus;
+  }
+
+  public void setExitstatus(int exitstatus) {
+this.exitstatus = exitstatus;
+  }
+
+  public RegistrationStatus getResponseStatus() {
+return response;
+  }
+
+  public void setResponseStatus(RegistrationStatus response) {
+this.response = response;
+  }
+
+  public List getStatusCommands() {
+return statusCommands;
+  }
+
+  public void setStatusCommands(List statusCommands) {
+this.statusCommands = statusCommands;
+  }
+
+  public long getResponseId() {
+return responseId;
+  }
+
+  public void setResponseId(long responseId) {
+this.responseId = responseId;
+  }
+
+  public String getTags() {
+return tags;
+  }
+
+  public void setTags(String tags) {
+this.tags = tags;
+  }
+
+  public String getLog() {
+return log;
+  }
+
+  public void setLog(String log) {
+this.log = log;
+  }
+
+  public String getPkg() {
+return pkg;
+  }
+
+  public void setPkg(String pkg) {
+this.pkg = pkg;
+  }
+
+  @Override
+  public String toString() {
+return "RegistrationResponse{" +
+   "response=" + response +
+   ", responseId=" + responseId +
+   ", statusCommands=" + statusCommands +
+   '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/RegistrationStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/RegistrationStatus.java
 

[57/66] [abbrv] hadoop git commit: YARN-5675. Swagger definition for YARN service API. Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5675. Swagger definition for YARN service API. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a7558f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a7558f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a7558f1

Branch: refs/heads/yarn-native-services
Commit: 9a7558f1734dc41d845aa3d60a3bca9dbdc4ed2e
Parents: e7ba434
Author: Jian He 
Authored: Wed Oct 12 13:27:53 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 ...RN-Simplified-V1-API-Layer-For-Services.yaml | 416 +++
 1 file changed, 416 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a7558f1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
new file mode 100644
index 000..6169fcd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
@@ -0,0 +1,416 @@
+# Hadoop YARN REST APIs for services v1 spec in YAML
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+swagger: '2.0'
+info:
+  title: "[YARN-4793] Simplified API layer for services and beyond"
+  description: |
+Bringing a new service on YARN today is not a simple experience. The APIs 
of existing frameworks are either too low level (native YARN), require writing 
new code (for frameworks with programmatic APIs) or writing a complex spec (for 
declarative frameworks). In addition to building critical building blocks 
inside YARN (as part of other efforts at 
link:https://issues.apache.org/jira/browse/YARN-4692[YARN-4692]), there is a 
need for simplifying the user facing story for building services. Experience of 
projects like Apache Slider running real-life services like HBase, Storm, 
Accumulo, Solr etc, gives us some very good insights on how simplified APIs for 
services should look like.
+
+
+To this end, we should look at a new simple-services API layer backed by 
REST interfaces. This API can be used to create and manage the lifecycle of 
YARN services. Services here can range from simple single-component apps to 
complex multi-component assemblies needing orchestration.
+
+
+We should also look at making this a unified REST based entry point for 
other important features like resource-profile management 
(link:https://issues.apache.org/jira/browse/YARN-3926[YARN-3926]), 
package-definitions' lifecycle-management and service-discovery 
(link:https://issues.apache.org/jira/browse/YARN-913[YARN-913]/link:https://issues.apache.org/jira/browse/YARN-4757[YARN-4757]).
 We also need to flesh out its relation to our present much lower level REST 
APIs (link:https://issues.apache.org/jira/browse/YARN-1695[YARN-1695]) in YARN 
for application-submission and management.
+
+
+This document spotlights on this specification. In most of the cases, the 
application owner will not be forced to make any changes to their application. 
This is primarily true if the application is packaged with containerization 
technologies like docker. Irrespective of how complex the application is, there 
will be hooks provided at appropriate layers to allow pluggable and 
customizable application behavior.
+
+  version: "1.0.0"
+  license:
+name: Apache 2.0
+url: http://www.apache.org/licenses/LICENSE-2.0.html
+# the domain of the service
+host: host.mycompany.com
+# array of all schemes that your API supports
+schemes:
+  - http
+  - https
+# will be prefixed to all paths

[39/66] [abbrv] hadoop git commit: Rename org.apache.slider.core.build to org.apache.slider.core.buildutils

2016-11-23 Thread jianhe
Rename org.apache.slider.core.build to org.apache.slider.core.buildutils


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a74c2eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a74c2eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a74c2eb

Branch: refs/heads/yarn-native-services
Commit: 3a74c2ebe5c9617ef6bb1a26aaec05bd1a4bf42b
Parents: 09ee280
Author: Jian He 
Authored: Wed Aug 10 00:39:58 2016 +0800
Committer: Jian He 
Committed: Wed Nov 23 15:25:49 2016 -0800

--
 .../org/apache/slider/client/SliderClient.java  |   5 +-
 .../slider/core/buildutils/BuildHelper.java |  48 +++
 .../slider/core/buildutils/InstanceBuilder.java | 312 +++
 .../slider/core/buildutils/InstanceIO.java  |  83 +
 .../server/appmaster/SliderAppMaster.java   |   5 +-
 5 files changed, 446 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a74c2eb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 8210f4d..d464ce0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -115,8 +115,8 @@ import org.apache.slider.common.tools.Duration;
 import org.apache.slider.common.tools.SliderFileSystem;
 import org.apache.slider.common.tools.SliderUtils;
 import org.apache.slider.common.tools.SliderVersionInfo;
-import org.apache.slider.core.build.InstanceBuilder;
-import org.apache.slider.core.build.InstanceIO;
+import org.apache.slider.core.buildutils.InstanceBuilder;
+import org.apache.slider.core.buildutils.InstanceIO;
 import org.apache.slider.core.conf.AggregateConf;
 import org.apache.slider.core.conf.ConfTree;
 import org.apache.slider.core.conf.ConfTreeOperations;
@@ -151,7 +151,6 @@ import org.apache.slider.core.registry.YarnAppListClient;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
 import org.apache.slider.core.registry.docstore.PublishedConfigSet;
 import org.apache.slider.core.registry.docstore.PublishedConfiguration;
-import 
org.apache.slider.core.registry.docstore.PublishedConfigurationOutputter;
 import org.apache.slider.core.registry.docstore.PublishedExports;
 import org.apache.slider.core.registry.docstore.PublishedExportsOutputter;
 import org.apache.slider.core.registry.docstore.PublishedExportsSet;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a74c2eb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/BuildHelper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/BuildHelper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/BuildHelper.java
new file mode 100644
index 000..80f165f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/buildutils/BuildHelper.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package 

[53/66] [abbrv] hadoop git commit: YARN-5689. Update native services REST API to use agentless docker provider. Contributed by Billie Rinaldi & Gour Saha

2016-11-23 Thread jianhe
YARN-5689. Update native services REST API to use agentless docker provider. 
Contributed by Billie Rinaldi & Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f273d930
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f273d930
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f273d930

Branch: refs/heads/yarn-native-services
Commit: f273d930a429fa7fcbc6350e0fb6d919333bd347
Parents: ebbf886
Author: Jian He 
Authored: Thu Oct 13 11:34:58 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../api/impl/ApplicationApiService.java | 251 ++-
 .../yarn/services/utils/RestApiConstants.java   |   3 -
 .../api/impl/TestApplicationApiService.java |   6 +-
 3 files changed, 79 insertions(+), 181 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f273d930/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index 9645696..0a62629 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -50,7 +50,6 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 
-import org.apache.commons.collections.MapUtils;
 import org.apache.commons.lang.SerializationUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -68,10 +67,12 @@ import 
org.apache.hadoop.yarn.services.resource.Configuration;
 import org.apache.hadoop.yarn.services.resource.Container;
 import org.apache.hadoop.yarn.services.resource.ContainerState;
 import org.apache.hadoop.yarn.services.resource.Resource;
+import org.apache.slider.api.OptionKeys;
 import org.apache.slider.api.ResourceKeys;
 import org.apache.slider.api.StateValues;
 import org.apache.slider.client.SliderClient;
 import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderKeys;
 import org.apache.slider.common.params.ActionCreateArgs;
 import org.apache.slider.common.params.ActionFlexArgs;
 import org.apache.slider.common.params.ActionFreezeArgs;
@@ -88,12 +89,11 @@ import org.apache.slider.core.exceptions.NotFoundException;
 import org.apache.slider.core.exceptions.SliderException;
 import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
+import org.apache.slider.providers.docker.DockerKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.gson.GsonBuilder;
-import com.google.gson.JsonArray;
 import com.google.gson.JsonElement;
 import com.google.gson.JsonNull;
 import com.google.gson.JsonObject;
@@ -211,7 +211,8 @@ public class ApplicationApiService implements 
ApplicationApi {
   application.setConfiguration(new Configuration());
 }
 addPropertyToConfiguration(application.getConfiguration(),
-PROPERTY_COMPONENT_TYPE, COMPONENT_TYPE_EXTERNAL);
+SliderKeys.COMPONENT_TYPE_KEY,
+SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
   }
   // resource
   validateApplicationResource(application.getResource(), null, application
@@ -249,7 +250,8 @@ public class ApplicationApiService implements 
ApplicationApi {
 comp.setConfiguration(new Configuration());
   }
   addPropertyToConfiguration(comp.getConfiguration(),
-  PROPERTY_COMPONENT_TYPE, COMPONENT_TYPE_EXTERNAL);
+  SliderKeys.COMPONENT_TYPE_KEY,
+  SliderKeys.COMPONENT_TYPE_EXTERNAL_APP);
   compNameArtifactIdMap.put(comp.getName(), 
comp.getArtifact().getId());
   comp.setName(comp.getArtifact().getId());
 }
@@ -339,9 +341,9 @@ public class ApplicationApiService implements 
ApplicationApi {
 final ActionCreateArgs createArgs = new ActionCreateArgs();
 addAppConfOptions(createArgs, application, compNameArtifactIdMap);
 

[30/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderExitCodes.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderExitCodes.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderExitCodes.java
new file mode 100644
index 000..5758f79
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderExitCodes.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.common;
+
+import org.apache.slider.core.main.LauncherExitCodes;
+
+public interface SliderExitCodes extends LauncherExitCodes {
+
+  /**
+   * starting point for exit codes; not an exception itself
+   */
+  int _EXIT_CODE_BASE =   64;
+
+  /**
+   * service entered the failed state: {@value}
+   */
+  int EXIT_YARN_SERVICE_FAILED =  65;
+
+  /**
+   * service was killed: {@value}
+   */
+  int EXIT_YARN_SERVICE_KILLED =  66;
+
+  /**
+   * timeout on monitoring client: {@value}
+   */
+  int EXIT_TIMED_OUT =67;
+
+  /**
+   * service finished with an error: {@value}
+   */
+  int EXIT_YARN_SERVICE_FINISHED_WITH_ERROR = 68;
+
+  /**
+   * the application instance is unknown: {@value}
+   */
+  int EXIT_UNKNOWN_INSTANCE = 69;
+
+  /**
+   * the application instance is in the wrong state for that operation: 
{@value}
+   */
+  int EXIT_BAD_STATE =70;
+
+  /**
+   * A spawned master process failed 
+   */
+  int EXIT_PROCESS_FAILED =   71;
+
+  /**
+   * The instance failed -too many containers were
+   * failing or some other threshold was reached
+   */
+  int EXIT_DEPLOYMENT_FAILED =72;
+
+  /**
+   * The application is live -and the requested operation
+   * does not work if the cluster is running
+   */
+  int EXIT_APPLICATION_IN_USE =   73;
+
+  /**
+   * There already is an application instance of that name
+   * when an attempt is made to create a new instance
+   */
+  int EXIT_INSTANCE_EXISTS =  75;
+
+  /**
+   * Exit code when the configurations in valid/incomplete: {@value}
+   */
+  int EXIT_BAD_CONFIGURATION =77;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
new file mode 100644
index 000..ba3effc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/SliderKeys.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  

[11/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
new file mode 100644
index 000..16c2435
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/state/AppState.java
@@ -0,0 +1,2489 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.state;
+
+import com.codahale.metrics.Metric;
+import com.codahale.metrics.MetricRegistry;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterDescriptionKeys;
+import org.apache.slider.api.ClusterDescriptionOperations;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.InternalKeys;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.StatusKeys;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.api.types.ComponentInformation;
+import org.apache.slider.api.types.RoleStatistics;
+import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.tools.ConfigHelper;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.NoSuchNodeException;
+import org.apache.slider.core.exceptions.SliderInternalStateException;
+import org.apache.slider.core.exceptions.TriggerClusterTeardownException;
+import org.apache.slider.core.persist.AggregateConfSerDeser;
+import org.apache.slider.core.persist.ConfTreeSerDeser;
+import org.apache.slider.providers.PlacementPolicy;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.server.appmaster.management.LongGauge;
+import org.apache.slider.server.appmaster.management.MetricsAndMonitoring;
+import org.apache.slider.server.appmaster.management.MetricsConstants;
+import org.apache.slider.server.appmaster.operations.AbstractRMOperation;
+import org.apache.slider.server.appmaster.operations.ContainerReleaseOperation;
+import org.apache.slider.server.appmaster.operations.ContainerRequestOperation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import 

[20/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
new file mode 100644
index 000..9767430
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderCore.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.exceptions.SliderException;
+
+import java.util.List;
+public interface ProviderCore {
+
+  String getName();
+
+  List getRoles();
+
+  Configuration getConf();
+
+  /**
+   * Verify that an instance definition is considered valid by the provider
+   * @param instanceDefinition instance definition
+   * @throws SliderException if the configuration is not valid
+   */
+  void validateInstanceDefinition(AggregateConf instanceDefinition) throws
+  SliderException;
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
new file mode 100644
index 000..761ac0f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderRole.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers;
+
+import org.apache.slider.api.ResourceKeys;
+
+/**
+ * Provider role and key for use in app requests.
+ * 
+ * This class uses the role name as the key for hashes and in equality tests,
+ * and ignores the other values.
+ */
+public final class ProviderRole {
+  public final String name;
+  public final String group;
+  public final int id;
+  public int placementPolicy;
+  public int nodeFailureThreshold;
+  public final long placementTimeoutSeconds;
+  public final String labelExpression;
+
+  public ProviderRole(String name, int id) {
+this(name,
+name,
+id,
+PlacementPolicy.DEFAULT,
+ResourceKeys.DEFAULT_NODE_FAILURE_THRESHOLD,
+ResourceKeys.DEFAULT_PLACEMENT_ESCALATE_DELAY_SECONDS,
+ResourceKeys.DEF_YARN_LABEL_EXPRESSION);
+  }
+
+  /**
+   * Create a provider role
+   * @param name role/component name
+   

[64/66] [abbrv] hadoop git commit: YARN-5778. Add .keep file for yarn native services AM web app. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5778. Add .keep file for yarn native services AM web app. Contributed by 
Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dff8965
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dff8965
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dff8965

Branch: refs/heads/yarn-native-services
Commit: 3dff8965cddd245a51750ba71e264b6b8e1b6fdc
Parents: ebd6392
Author: Gour Saha 
Authored: Tue Oct 25 10:06:16 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../src/main/resources/webapps/slideram/.keep| 0
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dff8965/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/webapps/slideram/.keep
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/webapps/slideram/.keep
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/webapps/slideram/.keep
new file mode 100644
index 000..e69de29


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[58/66] [abbrv] hadoop git commit: YARN-5796. Convert enums values in service code to upper case and special handling of an error. Contributed by Gour Saha

2016-11-23 Thread jianhe
YARN-5796. Convert enums values in service code to upper case and special 
handling of an error. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51318038
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51318038
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51318038

Branch: refs/heads/yarn-native-services
Commit: 513180384b49e3332f22d202b2b44c38740127ea
Parents: eda6e8c
Author: Jian He 
Authored: Tue Nov 1 11:00:11 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../yarn/services/api/impl/ApplicationApiService.java |  4 
 .../apache/hadoop/yarn/services/resource/Artifact.java|  2 +-
 .../apache/hadoop/yarn/services/resource/ConfigFile.java  |  4 ++--
 .../hadoop/yarn/services/resource/ReadinessCheck.java |  2 +-
 .../hadoop/yarn/services/utils/RestApiErrorMessages.java  | 10 +++---
 5 files changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51318038/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
index cf43ac2..37bd134 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/api/impl/ApplicationApiService.java
@@ -172,6 +172,10 @@ public class ApplicationApiService implements 
ApplicationApi {
 applicationStatus.setDiagnostics(ERROR_APPLICATION_IN_USE);
 return Response.status(Status.BAD_REQUEST).entity(applicationStatus)
 .build();
+  } else if (se.getExitCode() == SliderExitCodes.EXIT_INSTANCE_EXISTS) {
+applicationStatus.setDiagnostics(ERROR_APPLICATION_INSTANCE_EXISTS);
+return Response.status(Status.BAD_REQUEST).entity(applicationStatus)
+.build();
   } else {
 applicationStatus.setDiagnostics(se.getMessage());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51318038/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
index 9ac2bc7..87fcf89 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Artifact.java
@@ -40,7 +40,7 @@ public class Artifact implements Serializable {
   private String id = null;
 
   public enum TypeEnum {
-DOCKER("docker"), TARBALL("tarball"), APPLICATION("application");
+DOCKER("DOCKER"), TARBALL("TARBALL"), APPLICATION("APPLICATION");
 
 private String value;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51318038/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java
index 3ced153..01d976f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/ConfigFile.java
+++ 

[24/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
new file mode 100644
index 000..5a3eb3d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/launch/AbstractLauncher.java
@@ -0,0 +1,528 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.core.launch;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LogAggregationContext;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.RoleKeys;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.tools.CoreFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.MapOperations;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * Launcher of applications: base class
+ */
+public abstract class AbstractLauncher extends Configured {
+  private static final Logger log =
+LoggerFactory.getLogger(AbstractLauncher.class);
+  public static final String CLASSPATH = "CLASSPATH";
+  /**
+   * Filesystem to use for the launch
+   */
+  protected final CoreFileSystem coreFileSystem;
+  /**
+   * Env vars; set up at final launch stage
+   */
+  protected final Map envVars = new HashMap<>();
+  protected final MapOperations env = new MapOperations("env", envVars);
+  protected final ContainerLaunchContext containerLaunchContext =
+Records.newRecord(ContainerLaunchContext.class);
+  protected final List commands = new ArrayList<>(20);
+  protected final Map localResources = new HashMap<>();
+  protected final Map mountPaths = new HashMap<>();
+  private final Map serviceData = new HashMap<>();
+  // security
+  protected final Credentials credentials;
+  protected LogAggregationContext logAggregationContext;
+  protected boolean yarnDockerMode = false;
+  protected String dockerImage;
+  protected String yarnContainerMountPoints;
+  protected String runPrivilegedContainer;
+
+
+  /**
+   * Create instance.
+   * @param conf configuration
+   * @param coreFileSystem filesystem
+   * @param credentials initial set of credentials -null is permitted
+   */
+  protected AbstractLauncher(Configuration conf,
+  CoreFileSystem coreFileSystem,
+  Credentials credentials) {
+super(conf);
+this.coreFileSystem = coreFileSystem;
+this.credentials = credentials != null ? credentials: new Credentials();
+  }
+
+  /**
+   * Get the container. Until "completed", this isn't valid to launch.
+   * @return the container to launch
+   */
+  public ContainerLaunchContext getContainerLaunchContext() {
+return 

[59/66] [abbrv] hadoop git commit: YARN-5828. Native services client errors out when config formats are uppercase. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5828. Native services client errors out when config formats are uppercase. 
Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfbb0751
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfbb0751
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfbb0751

Branch: refs/heads/yarn-native-services
Commit: dfbb0751a153c34b3520a2623e9320af37e3ee19
Parents: 66b3c2a
Author: Gour Saha 
Authored: Thu Nov 3 18:15:44 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../org/apache/slider/core/registry/docstore/ConfigFormat.java   | 4 +++-
 .../src/main/java/org/apache/slider/providers/ProviderUtils.java | 2 +-
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfbb0751/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
index ddab606..723b975 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/registry/docstore/ConfigFormat.java
@@ -18,6 +18,8 @@
 
 package org.apache.slider.core.registry.docstore;
 
+import java.util.Locale;
+
 public enum ConfigFormat {
 
   JSON("json"),
@@ -51,7 +53,7 @@ public enum ConfigFormat {
*/
   public static ConfigFormat resolve(String type) {
 for (ConfigFormat format: values()) {
-  if (format.getSuffix().equals(type)) {
+  if (format.getSuffix().equals(type.toLowerCase(Locale.ENGLISH))) {
 return format;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfbb0751/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
index c5e6782..39986c1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/ProviderUtils.java
@@ -611,7 +611,7 @@ public class ProviderUtils implements RoleKeys, SliderKeys {
   }
   ConfigFormat configFormat = ConfigFormat.resolve(configFileType);
   if (configFormat == null) {
-throw new BadConfigException("Config format " + configFormat +
+throw new BadConfigException("Config format " + configFileType +
 " doesn't exist");
   }
   localizeConfigFile(launcher, roleName, roleGroup, configEntry.getKey(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[61/66] [abbrv] hadoop git commit: YARN-5610. Initial code for native services REST API. Contributed by Gour Saha

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7ba4349/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java
new file mode 100644
index 000..a3780cc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/services/resource/Resource.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.services.resource;
+
+import io.swagger.annotations.ApiModel;
+import io.swagger.annotations.ApiModelProperty;
+
+import java.util.Objects;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * Resource determines the amount of resources (vcores, memory, network, etc.)
+ * usable by a container. This field determines the resource to be applied for
+ * all the containers of a component or application. The resource specified at
+ * the app (or global) level can be overriden at the component level. Only one
+ * of profile OR cpu  memory are exepected. It raises a validation
+ * exception otherwise.
+ **/
+
+@ApiModel(description = "Resource determines the amount of resources (vcores, 
memory, network, etc.) usable by a container. This field determines the 
resource to be applied for all the containers of a component or application. 
The resource specified at the app (or global) level can be overriden at the 
component level. Only one of profile OR cpu & memory are exepected. It raises a 
validation exception otherwise.")
+@javax.annotation.Generated(value = "class 
io.swagger.codegen.languages.JavaClientCodegen", date = 
"2016-06-02T08:15:05.615-07:00")
+public class Resource extends BaseResource {
+  private static final long serialVersionUID = -6431667797380250037L;
+
+  private String profile = null;
+  private Integer cpus = null;
+  private String memory = null;
+
+  /**
+   * Each resource profile has a unique id which is associated with a
+   * cluster-level predefined memory, cpus, etc.
+   **/
+  public Resource profile(String profile) {
+this.profile = profile;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Each resource profile has a 
unique id which is associated with a cluster-level predefined memory, cpus, 
etc.")
+  @JsonProperty("profile")
+  public String getProfile() {
+return profile;
+  }
+
+  public void setProfile(String profile) {
+this.profile = profile;
+  }
+
+  /**
+   * Amount of vcores allocated to each container (optional but overrides cpus
+   * in profile if specified).
+   **/
+  public Resource cpus(Integer cpus) {
+this.cpus = cpus;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Amount of vcores allocated to 
each container (optional but overrides cpus in profile if specified).")
+  @JsonProperty("cpus")
+  public Integer getCpus() {
+return cpus;
+  }
+
+  public void setCpus(Integer cpus) {
+this.cpus = cpus;
+  }
+
+  /**
+   * Amount of memory allocated to each container (optional but overrides 
memory
+   * in profile if specified). Currently accepts only an integer value and
+   * default unit is in MB.
+   **/
+  public Resource memory(String memory) {
+this.memory = memory;
+return this;
+  }
+
+  @ApiModelProperty(example = "null", value = "Amount of memory allocated to 
each container (optional but overrides memory in profile if specified). 
Currently accepts only an integer value and default unit is in MB.")
+  @JsonProperty("memory")
+  public String getMemory() {
+return memory;
+  }
+
+  public void setMemory(String memory) {
+this.memory = memory;
+  }
+
+  @Override
+  public boolean equals(java.lang.Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != 

[34/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
new file mode 100644
index 000..081b7fa
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
@@ -0,0 +1,2293 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: SliderClusterProtocol.proto
+
+package org.apache.slider.api.proto;
+
+public final class SliderClusterAPI {
+  private SliderClusterAPI() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  /**
+   * Protobuf service {@code org.apache.slider.api.SliderClusterProtocolPB}
+   *
+   * 
+   **
+   * Protocol used from between Slider Client and AM
+   * 
+   */
+  public static abstract class SliderClusterProtocolPB
+  implements com.google.protobuf.Service {
+protected SliderClusterProtocolPB() {}
+
+public interface Interface {
+  /**
+   * rpc stopCluster(.org.apache.slider.api.StopClusterRequestProto) 
returns (.org.apache.slider.api.StopClusterResponseProto);
+   */
+  public abstract void stopCluster(
+  com.google.protobuf.RpcController controller,
+  org.apache.slider.api.proto.Messages.StopClusterRequestProto request,
+  
com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc 
upgradeContainers(.org.apache.slider.api.UpgradeContainersRequestProto) returns 
(.org.apache.slider.api.UpgradeContainersResponseProto);
+   *
+   * 
+   **
+   * Upgrade containers 
+   * 
+   */
+  public abstract void upgradeContainers(
+  com.google.protobuf.RpcController controller,
+  org.apache.slider.api.proto.Messages.UpgradeContainersRequestProto 
request,
+  
com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc flexCluster(.org.apache.slider.api.FlexClusterRequestProto) 
returns (.org.apache.slider.api.FlexClusterResponseProto);
+   *
+   * 
+   **
+   * Flex the cluster. 
+   * 
+   */
+  public abstract void flexCluster(
+  com.google.protobuf.RpcController controller,
+  org.apache.slider.api.proto.Messages.FlexClusterRequestProto request,
+  
com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc 
getJSONClusterStatus(.org.apache.slider.api.GetJSONClusterStatusRequestProto) 
returns (.org.apache.slider.api.GetJSONClusterStatusResponseProto);
+   *
+   * 
+   **
+   * Get the current cluster status
+   * 
+   */
+  public abstract void getJSONClusterStatus(
+  com.google.protobuf.RpcController controller,
+  
org.apache.slider.api.proto.Messages.GetJSONClusterStatusRequestProto request,
+  
com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc 
getInstanceDefinition(.org.apache.slider.api.GetInstanceDefinitionRequestProto) 
returns (.org.apache.slider.api.GetInstanceDefinitionResponseProto);
+   *
+   * 
+   **
+   * Get the instance definition
+   * 
+   */
+  public abstract void getInstanceDefinition(
+  com.google.protobuf.RpcController controller,
+  
org.apache.slider.api.proto.Messages.GetInstanceDefinitionRequestProto request,
+  
com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc 
listNodeUUIDsByRole(.org.apache.slider.api.ListNodeUUIDsByRoleRequestProto) 
returns (.org.apache.slider.api.ListNodeUUIDsByRoleResponseProto);
+   *
+   * 
+   **
+   * List all running nodes in a role
+   * 
+   */
+  public abstract void listNodeUUIDsByRole(
+  com.google.protobuf.RpcController controller,
+  org.apache.slider.api.proto.Messages.ListNodeUUIDsByRoleRequestProto 
request,
+  
com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc getNode(.org.apache.slider.api.GetNodeRequestProto) returns 
(.org.apache.slider.api.GetNodeResponseProto);
+   *
+   * 
+   **
+   * Get the details on a node
+   * 
+   */
+  public abstract void getNode(
+  com.google.protobuf.RpcController controller,
+  org.apache.slider.api.proto.Messages.GetNodeRequestProto request,
+  
com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc 

[02/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcess.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcess.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcess.java
new file mode 100644
index 000..9e9e7ac
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/LongLivedProcess.java
@@ -0,0 +1,598 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.services.workflow;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Execute a long-lived process.
+ *
+ * 
+ * Hadoop's {@link org.apache.hadoop.util.Shell} class assumes it is executing
+ * a short lived application; this class allows for the process to run for the
+ * life of the Java process that forked it.
+ * It is designed to be embedded inside a YARN service, though this is not
+ * the sole way that it can be used
+ * 
+ * Key Features:
+ * 
+ *   Output is streamed to the output logger provided.
+ *   the input stream is closed as soon as the process starts.
+ *   The most recent lines of output are saved to a linked list.
+ *   A synchronous callback, {@link LongLivedProcessLifecycleEvent},
+ *   is raised on the start and finish of a process.
+ * 
+ * 
+ */
+public class LongLivedProcess implements Runnable {
+  /**
+   * Limit on number of lines to retain in the "recent" line list:{@value}
+   */
+  public static final int RECENT_LINE_LOG_LIMIT = 64;
+
+  /**
+   * Const defining the time in millis between polling for new text.
+   */
+  private static final int STREAM_READER_SLEEP_TIME = 200;
+  
+  /**
+   * limit on the length of a stream before it triggers an automatic newline.
+   */
+  private static final int LINE_LENGTH = 256;
+  private final ProcessBuilder processBuilder;
+  private Process process;
+  private Integer exitCode = null;
+  private final String name;
+  private final ExecutorService processExecutor;
+  private final ExecutorService logExecutor;
+  
+  private ProcessStreamReader processStreamReader;
+  //list of recent lines, recorded for extraction into reports
+  private final List recentLines = new LinkedList<>();
+  private int recentLineLimit = RECENT_LINE_LOG_LIMIT;
+  private LongLivedProcessLifecycleEvent lifecycleCallback;
+  private final AtomicBoolean finalOutputProcessed = new AtomicBoolean(false);
+
+  /**
+   * Log supplied in the constructor for the spawned process -accessible
+   * to inner classes
+   */
+  private Logger processLog;
+  
+  /**
+   * Class log -accessible to inner classes
+   */
+  private static final Logger LOG = 
LoggerFactory.getLogger(LongLivedProcess.class);
+
+  /**
+   *  flag to indicate that the process is done
+   */
+  private final AtomicBoolean finished = new AtomicBoolean(false);
+
+  /**
+   * Create an instance
+   * @param name process name
+   * @param processLog log for output (or null)
+   * @param commands command list
+   */
+  public LongLivedProcess(String name,
+  Logger processLog,
+  List commands) {
+Preconditions.checkArgument(commands != 

[37/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
new file mode 100644
index 000..f8e5e7c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/ClusterDescription.java
@@ -0,0 +1,795 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.api;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.providers.SliderProviderFactory;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_HOME;
+import static org.apache.slider.api.OptionKeys.INTERNAL_APPLICATION_IMAGE_PATH;
+import static org.apache.slider.api.OptionKeys.ZOOKEEPER_PATH;
+import static org.apache.slider.api.OptionKeys.ZOOKEEPER_QUORUM;
+
+/**
+ * Represents a cluster specification; designed to be sendable over the wire
+ * and persisted in JSON by way of Jackson.
+ * 
+ * When used in cluster status operations the info
+ * and statistics maps contain information about the cluster.
+ * 
+ * As a wire format it is less efficient in both xfer and ser/deser than 
+ * a binary format, but by having one unified format for wire and persistence,
+ * the code paths are simplified.
+ *
+ * This was the original single-file specification/model used in the Hoya
+ * precursor to Slider. Its now retained primarily as a way to publish
+ * the current state of the application, or at least a fraction thereof ...
+ * the larger set of information from the REST API is beyond the scope of
+ * this structure.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+
+public class ClusterDescription implements Cloneable {
+  protected static final Logger
+log = LoggerFactory.getLogger(ClusterDescription.class);
+
+  private static final String UTF_8 = "UTF-8";
+
+  /**
+   * version counter
+   */
+  public String version = "1.0";
+
+  /**
+   * Name of the cluster
+   */
+  public String name;
+
+  /**
+   * Type of cluster
+   */
+  public String type = SliderProviderFactory.DEFAULT_CLUSTER_TYPE;
+
+  /**
+   * State of the cluster
+   */
+  public int state;
+  
+  /*
+   State list for both clusters and nodes in them. Ordered so that destroyed 
follows
+   stopped.
+   
+   Some of the states are only used for recording
+   the persistent state of the cluster and are not
+   seen in node descriptions
+   */
+
+  /**
+   * Specification is incomplete & 

[35/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/RestTypeMarshalling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/RestTypeMarshalling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/RestTypeMarshalling.java
new file mode 100644
index 000..17fd965
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/proto/RestTypeMarshalling.java
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.api.proto;
+
+import com.google.protobuf.ByteString;
+import org.apache.commons.io.IOUtils;
+import org.apache.slider.api.types.ApplicationLivenessInformation;
+import org.apache.slider.api.types.ComponentInformation;
+import org.apache.slider.api.types.ContainerInformation;
+import org.apache.slider.api.types.NodeEntryInformation;
+import org.apache.slider.api.types.NodeInformation;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.persist.AggregateConfSerDeser;
+import org.apache.slider.core.persist.ConfTreeSerDeser;
+import org.apache.slider.server.services.security.SecurityStore;
+
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Class to handle marshalling of REST
+ * types to/from Protobuf records.
+ */
+public class RestTypeMarshalling {
+
+  public static Messages.ApplicationLivenessInformationProto
+  marshall(ApplicationLivenessInformation info) {
+
+Messages.ApplicationLivenessInformationProto.Builder builder =
+Messages.ApplicationLivenessInformationProto.newBuilder();
+builder.setAllRequestsSatisfied(info.allRequestsSatisfied);
+builder.setRequestsOutstanding(info.requestsOutstanding);
+return builder.build();
+  }
+
+  public static ApplicationLivenessInformation
+  unmarshall(Messages.ApplicationLivenessInformationProto wire) {
+ApplicationLivenessInformation info = new ApplicationLivenessInformation();
+info.allRequestsSatisfied = wire.getAllRequestsSatisfied();
+info.requestsOutstanding = wire.getRequestsOutstanding();
+return info;
+  }
+
+  public static ComponentInformation
+  unmarshall(Messages.ComponentInformationProto wire) {
+ComponentInformation info = new ComponentInformation();
+info.name = wire.getName();
+info.priority = wire.getPriority();
+info.placementPolicy = wire.getPlacementPolicy();
+
+info.actual = wire.getActual();
+info.completed = wire.getCompleted();
+info.desired = wire.getDesired();
+info.failed = wire.getFailed();
+info.releasing = wire.getReleasing();
+info.requested = wire.getRequested();
+info.started = wire.getStarted();
+info.startFailed = wire.getStartFailed();
+info.totalRequested = wire.getTotalRequested();
+info.containers = new ArrayList<>(wire.getContainersList());
+if (wire.hasFailureMessage()) {
+  info.failureMessage = wire.getFailureMessage();
+}
+if (wire.hasPendingAntiAffineRequestCount()) {
+  info.pendingAntiAffineRequestCount = 
wire.getPendingAntiAffineRequestCount();
+}
+if (wire.hasIsAARequestOutstanding()) {
+  info.isAARequestOutstanding = wire.getIsAARequestOutstanding();
+}
+return info;
+  }
+
+  public static Messages.GetCertificateStoreResponseProto marshall(
+  SecurityStore securityStore) throws IOException {
+Messages.GetCertificateStoreResponseProto.Builder builder =
+

[25/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderVersionInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderVersionInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderVersionInfo.java
new file mode 100644
index 000..86025ee
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderVersionInfo.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import org.apache.hadoop.util.VersionInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URL;
+import java.util.Locale;
+import java.util.Properties;
+
+/**
+ * Extract the version properties, which will look something like
+ * 
+ * application.name=${pom.name}
+ * application.version=${pom.version}
+ * application.build=${buildNumber}
+ * application.build.java.version=${java.version}
+ * application.build.info=${pom.name}-${pom.version} Built against 
${buildNumber} on ${java.version} by ${user.name}
+ * 
+ * 
+ * the mvn process-resources target will expand the properties
+ * and add the resources to target/classes, which will then look something like
+ * 
+ *   application.name=Slider Core
+ *   application.version=0.7.1-SNAPSHOT
+ *   application.build=1dd69
+ *   application.build.java.version=1.7.0_45
+ *   application.build.user=stevel
+ *   application.build.info=Slider Core-0.7.1-SNAPSHOT Built against 1dd69 on 
1.7.0_45 by stevel
+ * 
+ * 
+ * Note: the values will change and more properties added.
+ */
+public class SliderVersionInfo {
+  private static final Logger log = 
LoggerFactory.getLogger(SliderVersionInfo.class);
+
+  /**
+   * Name of the resource containing the filled-in-at-runtime props
+   */
+  public static final String VERSION_RESOURCE =
+  "org/apache/slider/providers/dynamic/application.properties";
+
+  public static final String APP_NAME = "application.name";
+  public static final String APP_VERSION = "application.version";
+  public static final String APP_BUILD = "application.build";
+  public static final String APP_BUILD_JAVA_VERSION = 
"application.build.java.version";
+  public static final String APP_BUILD_USER = "application.build.user";
+  public static final String APP_BUILD_INFO = "application.build.info";
+  public static final String HADOOP_BUILD_INFO = "hadoop.build.info";
+  public static final String HADOOP_DEPLOYED_INFO = "hadoop.deployed.info";
+
+
+  public static Properties loadVersionProperties()  {
+Properties props = new Properties();
+URL resURL = SliderVersionInfo.class.getClassLoader()
+   .getResource(VERSION_RESOURCE);
+assert resURL != null : "Null resource " + VERSION_RESOURCE;
+
+try {
+  InputStream inStream = resURL.openStream();
+  assert inStream != null : "Null input stream from " + VERSION_RESOURCE;
+  props.load(inStream);
+} catch (IOException e) {
+  log.warn("IOE loading " + VERSION_RESOURCE, e);
+}
+return props;
+  }
+
+  /**
+   * Load the version info and print it
+   * @param logger logger
+   */
+  public static void loadAndPrintVersionInfo(Logger logger) {
+Properties props = loadVersionProperties();
+logger.info(props.getProperty(APP_BUILD_INFO));
+logger.info("Compiled against Hadoop {}",
+props.getProperty(HADOOP_BUILD_INFO));
+logger.info(getHadoopVersionString());
+  }
+  
+  public static String getHadoopVersionString() {
+return String.format(Locale.ENGLISH,
+"Hadoop runtime version %s with source checksum %s and build date %s",
+VersionInfo.getBranch(),
+

[16/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
new file mode 100644
index 000..67d3647
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/slideram/SliderAMProviderService.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers.slideram;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.tools.ConfigHelper;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.core.registry.docstore.PublishedConfiguration;
+import org.apache.slider.core.registry.info.CustomRegistryConstants;
+import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.providers.ProviderCore;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.providers.agent.AgentKeys;
+import org.apache.slider.server.appmaster.PublishedArtifacts;
+import org.apache.slider.server.appmaster.web.rest.RestPaths;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.slider.server.appmaster.web.rest.RestPaths.*;
+
+/**
+ * Exists just to move some functionality out of AppMaster into a peer class
+ * of the actual service provider doing the real work
+ */
+public class SliderAMProviderService extends AbstractProviderService implements
+ProviderCore,
+AgentKeys,
+SliderKeys {
+
+  public SliderAMProviderService() {
+super("SliderAMProviderService");
+  }
+
+  @Override
+  public String getHumanName() {
+return "Slider Application";
+  }
+  
+  @Override
+  public Configuration loadProviderConfigurationInformation(File confDir) 
throws
+  BadCommandArgumentsException,
+  IOException {
+return null;
+  }
+
+  @Override
+  public void buildContainerLaunchContext(ContainerLauncher containerLauncher,
+  AggregateConf instanceDefinition,
+  Container container,
+  ProviderRole role,
+  SliderFileSystem sliderFileSystem,
+  Path generatedConfPath,
+  MapOperations resourceComponent,
+  MapOperations appComponent,
+  Path containerTmpDirPath) throws IOException, SliderException {
+  }
+
+  @Override
+  public List getRoles() {
+return new ArrayList<>(0);
+  }
+
+  @Override
+  public void validateInstanceDefinition(AggregateConf instanceDefinition) 
throws
+  SliderException {
+
+  }
+
+  @Override
+  public void applyInitialRegistryDefinitions(URL amWebURI,
+  URL agentOpsURI,
+  URL agentStatusURI,
+  ServiceRecord serviceRecord)
+  throws IOException {
+super.applyInitialRegistryDefinitions(amWebURI,
+agentOpsURI,
+agentStatusURI,
+

[26/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
new file mode 100644
index 000..73e0879
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/SliderUtils.java
@@ -0,0 +1,2548 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.compress.archivers.zip.ZipArchiveEntry;
+import org.apache.commons.compress.archivers.zip.ZipArchiveInputStream;
+import org.apache.commons.io.output.ByteArrayOutputStream;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.GlobFilter;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.slider.Slider;
+import org.apache.slider.api.InternalKeys;
+import org.apache.slider.api.RoleKeys;
+import org.apache.slider.api.types.ContainerInformation;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.params.Arguments;
+import org.apache.slider.common.params.SliderActions;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.launch.ClasspathConstructor;
+import org.apache.slider.core.main.LauncherExitCodes;
+import org.apache.slider.providers.agent.AgentKeys;
+import org.apache.slider.server.services.utility.PatternValidator;
+import org.apache.slider.server.services.workflow.ForkedProcessService;
+import org.apache.zookeeper.server.util.KerberosUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintWriter;
+import 

[47/66] [abbrv] hadoop git commit: YARN-5505. Create an agent-less docker provider in the native-services framework. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5505. Create an agent-less docker provider in the native-services 
framework. Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fa8152d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fa8152d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fa8152d

Branch: refs/heads/yarn-native-services
Commit: 2fa8152d353c4768bac5c59de4ced3a9f715e163
Parents: 3355401
Author: Jian He 
Authored: Thu Sep 1 22:38:42 2016 +0800
Committer: Jian He 
Committed: Wed Nov 23 15:25:49 2016 -0800

--
 .../java/org/apache/slider/api/OptionKeys.java  |   15 +-
 .../org/apache/slider/client/SliderClient.java  |   17 +-
 .../org/apache/slider/common/SliderKeys.java|   22 +-
 .../apache/slider/common/tools/SliderUtils.java |4 +
 .../slider/core/launch/AbstractLauncher.java|   18 +-
 .../PublishedConfigurationOutputter.java|6 +-
 .../providers/AbstractClientProvider.java   |4 +-
 .../providers/AbstractProviderService.java  |   22 +-
 .../slider/providers/ProviderService.java   |   12 +-
 .../apache/slider/providers/ProviderUtils.java  | 1391 ++
 .../providers/agent/AgentClientProvider.java|   36 +-
 .../slider/providers/agent/AgentKeys.java   |   12 +-
 .../providers/agent/AgentProviderService.java   |  705 ++---
 .../providers/docker/DockerClientProvider.java  |   96 ++
 .../slider/providers/docker/DockerKeys.java |   32 +
 .../providers/docker/DockerProviderFactory.java |   43 +
 .../providers/docker/DockerProviderService.java |  355 +
 .../slideram/SliderAMProviderService.java   |4 -
 .../server/appmaster/SliderAppMaster.java   |   39 +-
 .../main/resources/org/apache/slider/slider.xml |4 +
 .../slider/providers/docker/appConfig.json  |   42 +
 .../slider/providers/docker/resources.json  |   16 +
 .../slider/providers/docker/test.template   |   16 +
 23 files changed, 1971 insertions(+), 940 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa8152d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
index a035a99..434b1d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/OptionKeys.java
@@ -41,7 +41,20 @@ public interface OptionKeys extends InternalKeys {
* Prefix for site.xml options: {@value}
*/
   String SITE_XML_PREFIX = "site.";
-
+  /**
+   * Prefix for config file options: {@value}
+   */
+  String CONF_FILE_PREFIX = "conf.";
+  /**
+   * Prefix for package options: {@value}
+   */
+  String PKG_FILE_PREFIX = "pkg.";
+  /**
+   * Prefix for export options: {@value}
+   */
+  String EXPORT_PREFIX = "export.";
+  String TYPE_SUFFIX = ".type";
+  String NAME_SUFFIX = ".name";
 
   /**
* Zookeeper quorum host list: {@value}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa8152d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index 3129f6f..5096bb7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -151,7 +151,6 @@ import org.apache.slider.core.registry.YarnAppListClient;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
 import org.apache.slider.core.registry.docstore.PublishedConfigSet;
 import org.apache.slider.core.registry.docstore.PublishedConfiguration;

[19/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
new file mode 100644
index 000..4ffae7c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentProviderService.java
@@ -0,0 +1,3212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers.agent;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.types.Endpoint;
+import org.apache.hadoop.registry.client.types.ProtocolTypes;
+import org.apache.hadoop.registry.client.types.ServiceRecord;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
+import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterNode;
+import org.apache.slider.api.InternalKeys;
+import org.apache.slider.api.OptionKeys;
+import org.apache.slider.api.ResourceKeys;
+import org.apache.slider.api.StatusKeys;
+import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.conf.AggregateConf;
+import org.apache.slider.core.conf.ConfTreeOperations;
+import org.apache.slider.core.conf.MapOperations;
+import org.apache.slider.core.exceptions.BadCommandArgumentsException;
+import org.apache.slider.core.exceptions.BadConfigException;
+import org.apache.slider.core.exceptions.NoSuchNodeException;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.launch.CommandLineBuilder;
+import org.apache.slider.core.launch.ContainerLauncher;
+import org.apache.slider.core.registry.docstore.ConfigFormat;
+import org.apache.slider.core.registry.docstore.ConfigUtils;
+import org.apache.slider.core.registry.docstore.ExportEntry;
+import org.apache.slider.core.registry.docstore.PublishedConfiguration;
+import 
org.apache.slider.core.registry.docstore.PublishedConfigurationOutputter;
+import org.apache.slider.core.registry.docstore.PublishedExports;
+import org.apache.slider.core.registry.info.CustomRegistryConstants;
+import org.apache.slider.providers.AbstractProviderService;
+import org.apache.slider.providers.MonitorDetail;
+import org.apache.slider.providers.ProviderCore;
+import org.apache.slider.providers.ProviderRole;
+import org.apache.slider.providers.ProviderUtils;
+import 
org.apache.slider.providers.agent.application.metadata.AbstractComponent;
+import org.apache.slider.providers.agent.application.metadata.Application;
+import 

[41/66] [abbrv] hadoop git commit: YARN-5538. Apply SLIDER-875 to yarn-native-services. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5538. Apply SLIDER-875 to yarn-native-services. Contributed by Billie 
Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3355401f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3355401f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3355401f

Branch: refs/heads/yarn-native-services
Commit: 3355401fa3c19bebd9a2a0b7ce4fe3dd81dae6f8
Parents: 59a8975
Author: Jian He 
Authored: Mon Aug 22 13:58:50 2016 +0800
Committer: Jian He 
Committed: Wed Nov 23 15:25:49 2016 -0800

--
 .../java/org/apache/slider/api/RoleKeys.java|   5 +
 .../org/apache/slider/client/SliderClient.java  |  14 +-
 .../org/apache/slider/common/SliderKeys.java|  13 ++
 .../apache/slider/common/tools/SliderUtils.java |  88 +++
 .../slider/core/buildutils/InstanceBuilder.java | 205 +
 .../slider/core/conf/ConfTreeOperations.java|  50 
 .../providers/AbstractProviderService.java  |   3 +-
 .../slider/providers/ProviderService.java   |   4 +-
 .../providers/agent/AgentClientProvider.java|  29 ++-
 .../slider/providers/agent/AgentKeys.java   |   3 +
 .../providers/agent/AgentProviderService.java   | 227 ++-
 .../slider/providers/agent/AgentUtils.java  |  16 ++
 .../providers/agent/ComponentCommandOrder.java  | 112 ++---
 .../server/appmaster/SliderAppMaster.java   |   6 +-
 .../slider/server/appmaster/state/AppState.java |   5 +
 .../appmaster/web/rest/agent/AgentResource.java |   4 +-
 16 files changed, 669 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3355401f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
index 812a6b3..ce413ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/api/RoleKeys.java
@@ -35,6 +35,11 @@ public interface RoleKeys {
   String ROLE_GROUP = "role.group";
 
   /**
+   * The prefix of a role: {@value}
+   */
+  String ROLE_PREFIX = "role.prefix";
+
+  /**
* Status report: number actually granted : {@value} 
*/
   String ROLE_ACTUAL_INSTANCES = "role.actual.instances";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3355401f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
index d464ce0..3129f6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/client/SliderClient.java
@@ -151,6 +151,7 @@ import org.apache.slider.core.registry.YarnAppListClient;
 import org.apache.slider.core.registry.docstore.ConfigFormat;
 import org.apache.slider.core.registry.docstore.PublishedConfigSet;
 import org.apache.slider.core.registry.docstore.PublishedConfiguration;
+import 
org.apache.slider.core.registry.docstore.PublishedConfigurationOutputter;
 import org.apache.slider.core.registry.docstore.PublishedExports;
 import org.apache.slider.core.registry.docstore.PublishedExportsOutputter;
 import org.apache.slider.core.registry.docstore.PublishedExportsSet;
@@ -724,7 +725,8 @@ public class SliderClient extends 
AbstractSliderLaunchedService implements RunSe
 AggregateConf instanceDefinition = loadInstanceDefinitionUnresolved(
 clustername, clusterDirectory);
 try {
-  checkForCredentials(getConfig(), instanceDefinition.getAppConf());
+  checkForCredentials(getConfig(), instanceDefinition.getAppConf(),
+  clustername);
 

[29/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionListArgs.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionListArgs.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionListArgs.java
new file mode 100644
index 000..739b5fc
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionListArgs.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.params;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import com.beust.jcommander.Parameter;
+import com.beust.jcommander.Parameters;
+
+@Parameters(commandNames = {SliderActions.ACTION_LIST},
+commandDescription = SliderActions.DESCRIBE_ACTION_LIST)
+
+public class ActionListArgs extends AbstractActionArgs {
+  @Override
+  public String getActionName() {
+return SliderActions.ACTION_LIST;
+  }
+
+  @Parameter(names = {ARG_LIVE},
+  description = "List only live application instances")
+  public boolean live;
+
+  @Parameter(names = {ARG_STATE},
+  description = "list only applications in the specific YARN state")
+  public String state = "";
+  
+  @Parameter(names = {ARG_VERBOSE},
+  description = "print out information in details")
+  public boolean verbose = false;
+
+  @Parameter(names = {ARG_CONTAINERS},
+  description = "List containers of an application instance")
+  public boolean containers;
+
+  @Parameter(names = {ARG_VERSION},
+  description = "Filter containers by app version (used with " +
+ARG_CONTAINERS + ")")
+  public String version;
+
+  @Parameter(names = {ARG_COMPONENTS}, variableArity = true,
+  description = "Filter containers by component names (used with " +
+ARG_CONTAINERS + ")")
+  public Set components = new HashSet<>(0);
+
+  /**
+   * Get the min #of params expected
+   * @return the min number of params in the {@link #parameters} field
+   */
+  public int getMinParams() {
+return 0;
+  }
+
+  @Override
+  public int getMaxParams() {
+return 1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionLookupArgs.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionLookupArgs.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionLookupArgs.java
new file mode 100644
index 000..1b73522
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/params/ActionLookupArgs.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the 

[40/66] [abbrv] hadoop git commit: Modify pom file for slider

2016-11-23 Thread jianhe
Modify pom file for slider


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e89fad22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e89fad22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e89fad22

Branch: refs/heads/yarn-native-services
Commit: e89fad22486a224067de5d9d6b305a89aaa417d3
Parents: 3a74c2e
Author: Jian He 
Authored: Fri Aug 12 09:28:41 2016 +0800
Committer: Jian He 
Committed: Wed Nov 23 15:25:49 2016 -0800

--
 .../hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml | 2 +-
 .../hadoop-yarn-applications/hadoop-yarn-slider/pom.xml| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e89fad22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index 14130c5..591a5ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -18,7 +18,7 @@
   4.0.0
   
 org.apache.hadoop
-hadoop-yarn-applications
+hadoop-yarn-slider
 3.0.0-alpha2-SNAPSHOT
   
   org.apache.hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e89fad22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
index ad374f8d..780f68b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
@@ -17,7 +17,7 @@
  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
   http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 
-hadoop-yarn
+hadoop-yarn-applications
 org.apache.hadoop
 3.0.0-alpha2-SNAPSHOT
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[56/66] [abbrv] hadoop git commit: YARN-5690. Integrate native services modules into maven build. Contributed by Billie Rinaldi

2016-11-23 Thread jianhe
YARN-5690. Integrate native services modules into maven build. Contributed by 
Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eda6e8cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eda6e8cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eda6e8cf

Branch: refs/heads/yarn-native-services
Commit: eda6e8cf2d0f159e3b05615d4a55cc73ac524d21
Parents: 8610dca
Author: Gour Saha 
Authored: Thu Oct 27 08:50:36 2016 -0700
Committer: Jian He 
Committed: Wed Nov 23 15:27:03 2016 -0800

--
 .../resources/assemblies/hadoop-yarn-dist.xml   | 26 
 .../assemblies/hadoop-yarn-services-api.xml | 36 +++
 .../assemblies/hadoop-yarn-slider-dist.xml  | 30 +
 hadoop-project/pom.xml  |  6 ++
 hadoop-yarn-project/hadoop-yarn/bin/yarn| 30 +
 .../hadoop-yarn-services-api/pom.xml| 44 +++--
 .../conf/slideram-log4j.properties  | 68 
 .../hadoop-yarn-slider-core/pom.xml | 38 +++
 .../org/apache/slider/client/SliderClient.java  | 29 +++--
 .../org/apache/slider/common/SliderKeys.java|  2 +-
 .../apache/slider/common/tools/SliderUtils.java | 48 ++
 .../providers/agent/AgentClientProvider.java|  3 +-
 .../slideram/SliderAMClientProvider.java| 15 +++--
 .../TestPublishedConfigurationOutputter.java| 10 ++-
 14 files changed, 343 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda6e8cf/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index c3f459c..552087c 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -86,6 +86,32 @@
   
 
 
+  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/target
+  
/share/hadoop/${hadoop.component}/sources
+  
+*-sources.jar
+  
+
+
+  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/conf
+  etc/hadoop
+
+
+  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/target/hadoop-yarn-slider-core-${project.version}
+  
/share/hadoop/${hadoop.component}/lib/slider
+
+
+  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/target
+  
/share/hadoop/${hadoop.component}/sources
+  
+*-sources.jar
+  
+
+
+  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/target/hadoop-yarn-services-api-${project.version}
+  
/share/hadoop/${hadoop.component}/lib/services-api
+
+
   
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/target
   
/share/hadoop/${hadoop.component}/sources
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda6e8cf/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-services-api.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-services-api.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-services-api.xml
new file mode 100644
index 000..589f724
--- /dev/null
+++ 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-services-api.xml
@@ -0,0 +1,36 @@
+
+http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0;
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+  
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0
 http://maven.apache.org/xsd/assembly-1.1.0.xsd;>
+  hadoop-yarn-services-api-dist
+  
+dir
+  
+  false
+  
+
+  false
+  
+com.fasterxml.jackson.jaxrs:jackson-jaxrs-base
+
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider
+
com.fasterxml.jackson.module:jackson-module-jaxb-annotations
+io.swagger:swagger-annotations
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda6e8cf/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-slider-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-slider-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-slider-dist.xml
new file mode 100644
index 000..5de45a9
--- /dev/null
+++ 

[07/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentEnv.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentEnv.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentEnv.java
new file mode 100644
index 000..781ae00
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/agent/AgentEnv.java
@@ -0,0 +1,376 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.agent;
+
+import com.google.gson.annotations.SerializedName;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class AgentEnv {
+
+  /**
+   * Various directories, configurable in ambari-agent.ini
+   */
+  private Directory[] stackFoldersAndFiles = new Directory[0];
+
+  /**
+   * Directories that match name /etc/alternatives/*conf
+   */
+  private Alternative[] alternatives = new Alternative[0];
+
+  /**
+   * List of existing users
+   */
+  private ExistingUser[] existingUsers = new ExistingUser[0];
+
+  /**
+   * List of repos
+   */
+  private String[] existingRepos = new String[0];
+
+  /**
+   * List of packages
+   */
+  private PackageDetail[] installedPackages = new PackageDetail[0];
+
+  /**
+   * The host health report
+   */
+  private HostHealth hostHealth = new HostHealth();
+
+  private Integer umask;
+
+  private Boolean iptablesIsRunning;
+
+  public Integer getUmask() {
+return umask;
+  }
+
+  public void setUmask(Integer umask) {
+this.umask = umask;
+  }
+
+  public Directory[] getStackFoldersAndFiles() {
+return stackFoldersAndFiles;
+  }
+
+  public void setStackFoldersAndFiles(Directory[] dirs) {
+stackFoldersAndFiles = dirs;
+  }
+
+  public void setExistingUsers(ExistingUser[] users) {
+existingUsers = users;
+  }
+
+  public ExistingUser[] getExistingUsers() {
+return existingUsers;
+  }
+
+  public void setAlternatives(Alternative[] dirs) {
+alternatives = dirs;
+  }
+
+  public Alternative[] getAlternatives() {
+return alternatives;
+  }
+
+  public void setExistingRepos(String[] repos) {
+existingRepos = repos;
+  }
+
+  public String[] getExistingRepos() {
+return existingRepos;
+  }
+
+  public void setInstalledPackages(PackageDetail[] packages) {
+installedPackages = packages;
+  }
+
+  public PackageDetail[] getInstalledPackages() {
+return installedPackages;
+  }
+
+  public void setHostHealth(HostHealth healthReport) {
+hostHealth = healthReport;
+  }
+
+  public HostHealth getHostHealth() {
+return hostHealth;
+  }
+
+  public Boolean getIptablesIsRunning() {
+return iptablesIsRunning;
+  }
+
+  public void setIptablesIsRunning(Boolean iptablesIsRunning) {
+this.iptablesIsRunning = iptablesIsRunning;
+  }
+
+  public static class HostHealth {
+/**
+ * Java processes running on the system.  Default empty array.
+ */
+@SerializedName("activeJavaProcs")
+private JavaProc[] activeJavaProcs = new JavaProc[0];
+
+/**
+ * The current time when agent send the host check report
+ */
+@SerializedName("agentTimeStampAtReporting")
+private long agentTimeStampAtReporting = 0;
+
+/**
+ * The current time when host check report was received
+ */
+@SerializedName("serverTimeStampAtReporting")
+private long serverTimeStampAtReporting = 0;
+
+/**
+ * Live services running on the agent
+ */
+@SerializedName("liveServices")
+private LiveService[] liveServices = new LiveService[0];
+
+   

[23/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/RunService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/RunService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/RunService.java
new file mode 100644
index 000..c3a1d0e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/RunService.java
@@ -0,0 +1,62 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.core.main;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
+
+/**
+ * An interface which services can implement to have their
+ * execution managed by the ServiceLauncher.
+ * The command line options will be passed down before the 
+ * {@link Service#init(Configuration)} operation is invoked via an
+ * invocation of {@link RunService#bindArgs(Configuration, String...)}
+ * After the service has been successfully started via {@link Service#start()}
+ * the {@link RunService#runService()} method is called to execute the 
+ * service. When this method returns, the service launcher will exit, using
+ * the return code from the method as its exit option.
+ */
+public interface RunService extends Service {
+
+  /**
+   * Propagate the command line arguments.
+   * This method is called before {@link Service#init(Configuration)};
+   * the configuration that is returned from this operation
+   * is the one that is passed on to the init operation.
+   * This permits implemenations to change the configuration before
+   * the init operation.n
+   * 
+   *
+   * @param config the initial configuration build up by the
+   * service launcher.
+   * @param args argument list list of arguments passed to the command line
+   * after any launcher-specific commands have been stripped.
+   * @return the configuration to init the service with. This MUST NOT be null.
+   * Recommended: pass down the config parameter with any changes
+   * @throws Exception any problem
+   */
+  Configuration bindArgs(Configuration config, String... args) throws 
Exception;
+  
+  /**
+   * Run a service. This called after {@link Service#start()}
+   * @return the exit code
+   * @throws Throwable any exception to report
+   */
+  int runService() throws Throwable ;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java
new file mode 100644
index 000..27813b7
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/core/main/ServiceLaunchException.java
@@ -0,0 +1,73 @@
+/*
+ *  Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed 

[14/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
new file mode 100644
index 000..6b61681
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionFlexCluster.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.actions;
+
+import org.apache.slider.core.conf.ConfTree;
+import org.apache.slider.server.appmaster.SliderAppMaster;
+import org.apache.slider.server.appmaster.state.AppState;
+
+import java.util.concurrent.TimeUnit;
+
+public class ActionFlexCluster extends AsyncAction {
+
+  public final ConfTree resources;
+  
+  public ActionFlexCluster(String name,
+  long delay,
+  TimeUnit timeUnit, ConfTree resources) {
+super(name, delay, timeUnit, ATTR_CHANGES_APP_SIZE);
+this.resources = resources;
+  }
+
+  @Override
+  public void execute(SliderAppMaster appMaster,
+  QueueAccess queueService,
+  AppState appState) throws Exception {
+appMaster.flexCluster(resources);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionHalt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionHalt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionHalt.java
new file mode 100644
index 000..e2ad559
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/actions/ActionHalt.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster.actions;
+
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.slider.server.appmaster.SliderAppMaster;
+import org.apache.slider.server.appmaster.state.AppState;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Exit an emergency JVM halt.
+ * @see ExitUtil#halt(int, String) 
+ */
+public class ActionHalt extends AsyncAction {
+
+  private final int status;
+  private final String text;
+
+  public ActionHalt(
+  int status,
+  String text,
+  long delay, TimeUnit timeUnit) {
+
+// do not declare that this action halts the cluster ... keep it a surprise
+super("Halt", delay, timeUnit);
+this.status = status;
+this.text = 

[05/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java
new file mode 100644
index 000..a8e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ComponentResource.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.management.resources;
+
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import javax.ws.rs.core.UriBuilder;
+import java.util.Map;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class ComponentResource {
+  private final Map props;
+  private String href;
+
+  public ComponentResource() {
+this(null, null, null, null);
+  }
+
+  public ComponentResource(String name,
+   Map props,
+   UriBuilder uriBuilder,
+   Map pathElems) {
+this.props = props;
+  }
+
+  public Map getProps() {
+return props;
+  }
+
+  public String getHref() {
+return href;
+  }
+
+  public void setHref(String href) {
+this.href = href;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java
new file mode 100644
index 000..407bab6
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/web/rest/management/resources/ConfTreeResource.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.server.appmaster.web.rest.management.resources;
+
+import org.apache.slider.core.conf.ConfTree;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import javax.ws.rs.core.UriBuilder;
+import java.util.Map;
+

[27/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
new file mode 100644
index 000..aa5edf1
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/common/tools/CoreFileSystem.java
@@ -0,0 +1,915 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.common.tools;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.slider.common.SliderExitCodes;
+import org.apache.slider.common.SliderKeys;
+import org.apache.slider.common.SliderXmlConfKeys;
+import org.apache.slider.core.exceptions.BadClusterStateException;
+import org.apache.slider.core.exceptions.ErrorStrings;
+import org.apache.slider.core.exceptions.SliderException;
+import org.apache.slider.core.exceptions.UnknownApplicationInstanceException;
+import org.apache.slider.core.persist.Filenames;
+import org.apache.slider.core.persist.InstancePaths;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Enumeration;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+import static 
org.apache.slider.common.SliderXmlConfKeys.CLUSTER_DIRECTORY_PERMISSIONS;
+import static 
org.apache.slider.common.SliderXmlConfKeys.DEFAULT_CLUSTER_DIRECTORY_PERMISSIONS;
+
+public class CoreFileSystem {
+  private static final Logger
+log = LoggerFactory.getLogger(CoreFileSystem.class);
+
+  private static final String UTF_8 = "UTF-8";
+
+  protected final FileSystem fileSystem;
+  protected final Configuration configuration;
+
+  public CoreFileSystem(FileSystem fileSystem, Configuration configuration) {
+Preconditions.checkNotNull(fileSystem,
+   "Cannot create a CoreFileSystem with a null 
FileSystem");
+Preconditions.checkNotNull(configuration,
+   "Cannot create a CoreFileSystem with a null 
Configuration");
+this.fileSystem = fileSystem;
+this.configuration = configuration;
+  }
+
+  public CoreFileSystem(Configuration configuration) throws IOException {
+Preconditions.checkNotNull(configuration,
+   "Cannot create a CoreFileSystem with a null 
Configuration");
+this.fileSystem = FileSystem.get(configuration);
+this.configuration = fileSystem.getConf();
+  }
+  
+  /**
+   * Get the temp path for this cluster
+   * @param clustername name of the cluster
+   * @return path for temp files (is not purged)
+   */
+  public Path getTempPathForCluster(String clustername) {
+Path clusterDir = buildClusterDirPath(clustername);
+return new Path(clusterDir, SliderKeys.TMP_DIR_PREFIX);
+  }
+
+  /**
+   * Returns the 

[18/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java
new file mode 100644
index 000..281895a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentRoles.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.slider.providers.agent;
+
+import org.apache.slider.providers.ProviderRole;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class AgentRoles {
+
+  /**
+   * List of roles Agent provider does not have any roles by default. All 
roles are read from the application
+   * specification.
+   */
+  protected static final List ROLES =
+  new ArrayList();
+
+  public static List getRoles() {
+return ROLES;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentUtils.java
new file mode 100644
index 000..cfcfc5d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/providers/agent/AgentUtils.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.slider.providers.agent;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.slider.common.tools.SliderFileSystem;
+import org.apache.slider.common.tools.SliderUtils;
+import org.apache.slider.core.exceptions.BadConfigException;
+import 
org.apache.slider.providers.agent.application.metadata.AbstractMetainfoParser;
+import 
org.apache.slider.providers.agent.application.metadata.AddonPackageMetainfoParser;
+import org.apache.slider.providers.agent.application.metadata.DefaultConfig;
+import 
org.apache.slider.providers.agent.application.metadata.DefaultConfigParser;
+import org.apache.slider.providers.agent.application.metadata.Metainfo;
+import org.apache.slider.providers.agent.application.metadata.MetainfoParser;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ *
+ */
+public class AgentUtils {
+  private static final Logger log = LoggerFactory.getLogger(AgentUtils.class);
+
+  

[15/66] [abbrv] hadoop git commit: YARN-5461. Initial code ported from slider-core module. (jianhe)

2016-11-23 Thread jianhe
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09ee280b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
new file mode 100644
index 000..b767059
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -0,0 +1,2450 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ *  or more contributor license agreements.  See the NOTICE file
+ *  distributed with this work for additional information
+ *  regarding copyright ownership.  The ASF licenses this file
+ *  to you under the Apache License, Version 2.0 (the
+ *  "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.slider.server.appmaster;
+
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.health.HealthCheckRegistry;
+import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
+import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
+import com.codahale.metrics.jvm.ThreadStatesGaugeSet;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.registry.client.binding.RegistryTypeUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.service.ServiceOperations;
+import org.apache.hadoop.service.ServiceStateChangeListener;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
+import org.apache.hadoop.yarn.client.api.async.NMClientAsync;
+import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.*;
+import static org.apache.slider.common.Constants.HADOOP_JAAS_DEBUG;
+
+import 
org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.registry.client.api.RegistryOperations;
+import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;

hadoop git commit: HADOOP-13801 regression: ITestS3AMiniYarnCluster failing. Contributed by Steve Loughran

2016-11-23 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 834e2ddba -> 01e3b0dae


HADOOP-13801 regression: ITestS3AMiniYarnCluster failing. Contributed by Steve 
Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01e3b0da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01e3b0da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01e3b0da

Branch: refs/heads/branch-2.8
Commit: 01e3b0dae8c38478a1a9fec1bad948e9720f5b50
Parents: 834e2dd
Author: Steve Loughran 
Authored: Wed Nov 23 21:37:04 2016 +
Committer: Steve Loughran 
Committed: Wed Nov 23 21:37:23 2016 +

--
 .../hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java  | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01e3b0da/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
index 8421dad..00bd866 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
@@ -24,13 +24,14 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.examples.WordCount;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3ATestUtils;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -137,8 +138,12 @@ public class ITestS3AMiniYarnCluster extends 
AbstractS3ATestBase {
* helper method.
*/
   private String readStringFromFile(Path path) throws IOException {
-return ContractTestUtils.readBytesToString(fs, path,
-(int) fs.getFileStatus(path).getLen());
+try (FSDataInputStream in = fs.open(path)) {
+  long bytesLen = fs.getFileStatus(path).getLen();
+  byte[] buffer = new byte[(int) bytesLen];
+  IOUtils.readFully(in, buffer, 0, buffer.length);
+  return new String(buffer);
+}
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13801 regression: ITestS3AMiniYarnCluster failing. Contributed by Steve Loughran

2016-11-23 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2d068f179 -> f3fff5c2e


HADOOP-13801 regression: ITestS3AMiniYarnCluster failing. Contributed by Steve 
Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3fff5c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3fff5c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3fff5c2

Branch: refs/heads/branch-2
Commit: f3fff5c2e1c8fcd9766f9987f1a55e0981d71168
Parents: 2d068f1
Author: Steve Loughran 
Authored: Wed Nov 23 21:37:04 2016 +
Committer: Steve Loughran 
Committed: Wed Nov 23 21:37:04 2016 +

--
 .../hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java  | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fff5c2/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
index b74ab52..f99de7f 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
@@ -24,13 +24,14 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.examples.WordCount;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3ATestUtils;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -140,8 +141,12 @@ public class ITestS3AMiniYarnCluster extends 
AbstractS3ATestBase {
* helper method.
*/
   private String readStringFromFile(Path path) throws IOException {
-return ContractTestUtils.readBytesToString(fs, path,
-(int) fs.getFileStatus(path).getLen());
+try (FSDataInputStream in = fs.open(path)) {
+  long bytesLen = fs.getFileStatus(path).getLen();
+  byte[] buffer = new byte[(int) bytesLen];
+  IOUtils.readFully(in, buffer, 0, buffer.length);
+  return new String(buffer);
+}
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13801 regression: ITestS3AMiniYarnCluster failing. Contributed by Steve Loughran

2016-11-23 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 005850b28 -> 0de0c32dd


HADOOP-13801 regression: ITestS3AMiniYarnCluster failing. Contributed by Steve 
Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0de0c32d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0de0c32d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0de0c32d

Branch: refs/heads/trunk
Commit: 0de0c32ddd46eaf42198dbf24ec4344f6810ca09
Parents: 005850b
Author: Steve Loughran 
Authored: Wed Nov 23 21:37:04 2016 +
Committer: Steve Loughran 
Committed: Wed Nov 23 21:37:39 2016 +

--
 .../hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java  | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0de0c32d/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
index d8ae313..6db4eba 100644
--- 
a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
+++ 
b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/yarn/ITestS3AMiniYarnCluster.java
@@ -24,13 +24,14 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.examples.WordCount;
 import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3ATestUtils;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -140,8 +141,12 @@ public class ITestS3AMiniYarnCluster extends 
AbstractS3ATestBase {
* helper method.
*/
   private String readStringFromFile(Path path) throws IOException {
-return ContractTestUtils.readBytesToString(fs, path,
-(int) fs.getFileStatus(path).getLen());
+try (FSDataInputStream in = fs.open(path)) {
+  long bytesLen = fs.getFileStatus(path).getLen();
+  byte[] buffer = new byte[(int) bytesLen];
+  IOUtils.readFully(in, buffer, 0, buffer.length);
+  return new String(buffer);
+}
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5865. Retrospect updateApplicationPriority api to handle state store exception in align with YARN-5611. Contributed by Sunil G.

2016-11-23 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2d94f938c -> 2d068f179


YARN-5865. Retrospect updateApplicationPriority api to handle state store 
exception in align with YARN-5611. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d068f17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d068f17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d068f17

Branch: refs/heads/branch-2
Commit: 2d068f179390da79b28a3f2aa28cf8f1838b7085
Parents: 2d94f93
Author: Rohith Sharma K S 
Authored: Wed Nov 23 23:29:39 2016 +0530
Committer: Rohith Sharma K S 
Committed: Wed Nov 23 23:29:39 2016 +0530

--
 .../ApplicationMasterService.java   |  2 +-
 .../server/resourcemanager/ClientRMService.java | 10 ++--
 .../server/resourcemanager/RMAppManager.java| 49 ++--
 .../metrics/SystemMetricsPublisher.java |  3 +-
 .../server/resourcemanager/rmapp/RMApp.java | 14 ++
 .../server/resourcemanager/rmapp/RMAppImpl.java | 37 ---
 .../scheduler/AbstractYarnScheduler.java|  6 ++-
 .../scheduler/YarnScheduler.java| 11 -
 .../scheduler/capacity/CapacityScheduler.java   | 28 +--
 .../scheduler/event/AppAddedSchedulerEvent.java |  5 +-
 .../resourcemanager/webapp/RMWebServices.java   |  6 +--
 .../resourcemanager/webapp/dao/AppInfo.java |  9 ++--
 .../server/resourcemanager/TestAppManager.java  |  1 +
 .../TestApplicationMasterService.java   |  6 +--
 .../resourcemanager/TestClientRMService.java| 13 --
 .../applicationsmanager/MockAsm.java| 11 +
 .../metrics/TestSystemMetricsPublisher.java |  2 +
 .../server/resourcemanager/rmapp/MockRMApp.java | 10 
 .../rmapp/TestRMAppTransitions.java |  2 +
 .../capacity/TestApplicationPriority.java   | 13 +++---
 20 files changed, 177 insertions(+), 61 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d068f17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index 888bea9..108c327 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -604,7 +604,7 @@ public class ApplicationMasterService extends 
AbstractService implements
 
   // Set application priority
   allocateResponse.setApplicationPriority(app
-  .getApplicationSubmissionContext().getPriority());
+  .getApplicationPriority());
 
   // update AMRMToken if the token is rolled-up
   MasterKeyData nextMasterKey =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d068f17/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 6690cd8..21a9d13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1584,14 +1584,14 @@ public class ClientRMService extends AbstractService 
implements
 .newRecordInstance(UpdateApplicationPriorityResponse.class);
 // Update priority only when app is tracked by the scheduler
 if (!ACTIVE_APP_STATES.contains(application.getState())) {
-  if 

hadoop git commit: YARN-5918. Handle Opportunistic scheduling allocate request failure when NM is lost. (Bibin A Chundatt via asuresh)

2016-11-23 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3541ed806 -> 005850b28


YARN-5918. Handle Opportunistic scheduling allocate request failure when NM is 
lost. (Bibin A Chundatt via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/005850b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/005850b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/005850b2

Branch: refs/heads/trunk
Commit: 005850b28feb2f7bb8c2844d11e3f9d21b45d754
Parents: 3541ed8
Author: Arun Suresh 
Authored: Wed Nov 23 09:53:31 2016 -0800
Committer: Arun Suresh 
Committed: Wed Nov 23 09:53:31 2016 -0800

--
 ...pportunisticContainerAllocatorAMService.java | 13 ++-
 .../distributed/NodeQueueLoadMonitor.java   |  8 +-
 ...pportunisticContainerAllocatorAMService.java | 97 +++-
 3 files changed, 110 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/005850b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
index d3db96f..7814b84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java
@@ -57,6 +57,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.NodeQueueLoadMonitor;
 
@@ -409,15 +410,19 @@ public class OpportunisticContainerAllocatorAMService
   private List convertToRemoteNodes(List nodeIds) {
 ArrayList retNodes = new ArrayList<>();
 for (NodeId nId : nodeIds) {
-  retNodes.add(convertToRemoteNode(nId));
+  RemoteNode remoteNode = convertToRemoteNode(nId);
+  if (null != remoteNode) {
+retNodes.add(remoteNode);
+  }
 }
 return retNodes;
   }
 
   private RemoteNode convertToRemoteNode(NodeId nodeId) {
-return RemoteNode.newInstance(nodeId,
-((AbstractYarnScheduler)rmContext.getScheduler()).getNode(nodeId)
-.getHttpAddress());
+SchedulerNode node =
+((AbstractYarnScheduler) rmContext.getScheduler()).getNode(nodeId);
+return node != null ? RemoteNode.newInstance(nodeId, node.getHttpAddress())
+: null;
   }
 
   private Resource createMaxContainerResource() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/005850b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
index 232b4ad..dec55ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/distributed/NodeQueueLoadMonitor.java
@@ -165,9 +165,11 @@ public class 

hadoop git commit: YARN-4330. MiniYARNCluster is showing multiple Failed to instantiate default resource calculator warning messages. Contributed by Varun Saxena

2016-11-23 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b96ed4389 -> 2d94f938c


YARN-4330. MiniYARNCluster is showing multiple  Failed to instantiate default 
resource calculator warning messages. Contributed by Varun Saxena

(cherry picked from commit 3541ed80685f25486f33ef0f553854ccbdeb51d4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d94f938
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d94f938
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d94f938

Branch: refs/heads/branch-2
Commit: 2d94f938c5f68a44402cee84146413db95853227
Parents: b96ed43
Author: Naganarasimha 
Authored: Wed Nov 23 14:12:23 2016 +0530
Committer: Naganarasimha 
Committed: Wed Nov 23 14:14:56 2016 +0530

--
 .../yarn/util/ResourceCalculatorPlugin.java |   3 +
 .../src/main/resources/yarn-default.xml |   6 +-
 .../nodemanager/NodeResourceMonitorImpl.java|   5 +
 .../monitor/ContainersMonitorImpl.java  |   3 +-
 .../util/NodeManagerHardwareUtils.java  | 160 +++
 .../hadoop/yarn/server/MiniYARNCluster.java |   7 +-
 6 files changed, 110 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d94f938/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index e7e4c8a..fd63d98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -190,6 +190,9 @@ public class ResourceCalculatorPlugin extends Configured {
 }
 try {
   return new ResourceCalculatorPlugin();
+} catch (UnsupportedOperationException ue) {
+  LOG.warn("Failed to instantiate default resource calculator. "
+  + ue.getMessage());
 } catch (Throwable t) {
   LOG.warn(t + ": Failed to instantiate default resource calculator.", t);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d94f938/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 3efbd2a..b4da335 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1310,7 +1310,8 @@
   
 
   
-How often to monitor the node and the 
containers.
+How often to monitor the node and the containers.
+If 0 or negative, monitoring is disabled.
 yarn.nodemanager.resource-monitor.interval-ms
 3000
   
@@ -1328,7 +1329,8 @@
 
   
 How often to monitor containers. If not set, the value for
-yarn.nodemanager.resource-monitor.interval-ms will be used.
+yarn.nodemanager.resource-monitor.interval-ms will be used.
+If 0 or negative, container monitoring is disabled.
 yarn.nodemanager.container-monitor.interval-ms
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d94f938/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
index 8ee27de..e1116da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
@@ -78,6 +78,11 @@ public class NodeResourceMonitorImpl extends AbstractService 
implements
* @return true if we can 

hadoop git commit: YARN-4330. MiniYARNCluster is showing multiple Failed to instantiate default resource calculator warning messages. Contributed by Varun Saxena

2016-11-23 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/trunk 466756416 -> 3541ed806


YARN-4330. MiniYARNCluster is showing multiple  Failed to instantiate default 
resource calculator warning messages. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3541ed80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3541ed80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3541ed80

Branch: refs/heads/trunk
Commit: 3541ed80685f25486f33ef0f553854ccbdeb51d4
Parents: 4667564
Author: Naganarasimha 
Authored: Wed Nov 23 14:12:23 2016 +0530
Committer: Naganarasimha 
Committed: Wed Nov 23 14:12:23 2016 +0530

--
 .../yarn/util/ResourceCalculatorPlugin.java |   3 +
 .../src/main/resources/yarn-default.xml |   6 +-
 .../nodemanager/NodeResourceMonitorImpl.java|   5 +
 .../monitor/ContainersMonitorImpl.java  |   3 +-
 .../util/NodeManagerHardwareUtils.java  | 160 +++
 .../hadoop/yarn/server/MiniYARNCluster.java |   7 +-
 6 files changed, 110 insertions(+), 74 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3541ed80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
index e7e4c8a..fd63d98 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ResourceCalculatorPlugin.java
@@ -190,6 +190,9 @@ public class ResourceCalculatorPlugin extends Configured {
 }
 try {
   return new ResourceCalculatorPlugin();
+} catch (UnsupportedOperationException ue) {
+  LOG.warn("Failed to instantiate default resource calculator. "
+  + ue.getMessage());
 } catch (Throwable t) {
   LOG.warn(t + ": Failed to instantiate default resource calculator.", t);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3541ed80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index c436289..47d12d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1344,7 +1344,8 @@
   
 
   
-How often to monitor the node and the 
containers.
+How often to monitor the node and the containers.
+If 0 or negative, monitoring is disabled.
 yarn.nodemanager.resource-monitor.interval-ms
 3000
   
@@ -1362,7 +1363,8 @@
 
   
 How often to monitor containers. If not set, the value for
-yarn.nodemanager.resource-monitor.interval-ms will be used.
+yarn.nodemanager.resource-monitor.interval-ms will be used.
+If 0 or negative, container monitoring is disabled.
 yarn.nodemanager.container-monitor.interval-ms
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3541ed80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
index 8ee27de..e1116da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeResourceMonitorImpl.java
@@ -78,6 +78,11 @@ public class NodeResourceMonitorImpl extends AbstractService 
implements
* @return true if we can monitor the node resource utilization.
*/
   private boolean isEnabled() 

hadoop git commit: YARN-5911. DrainDispatcher does not drain all events on stop even if setDrainEventsOnStop is true. Contributed by Varun Saxena.

2016-11-23 Thread naganarasimha_gr
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c11e0ef62 -> b96ed4389


YARN-5911. DrainDispatcher does not drain all events on stop even if 
setDrainEventsOnStop is true. Contributed by Varun Saxena.

(cherry picked from commit 466756416214a4bbc77af8a29da1a33e01106864)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b96ed438
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b96ed438
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b96ed438

Branch: refs/heads/branch-2
Commit: b96ed4389701b0b03d5069f25c67f36affb86fc9
Parents: c11e0ef
Author: Naganarasimha 
Authored: Wed Nov 23 08:44:58 2016 +0530
Committer: Naganarasimha 
Committed: Wed Nov 23 09:36:43 2016 +0530

--
 .../hadoop/yarn/event/AsyncDispatcher.java  |  6 ++-
 .../hadoop/yarn/event/DrainDispatcher.java  |  9 +
 .../hadoop/yarn/event/TestAsyncDispatcher.java  | 42 
 3 files changed, 48 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b96ed438/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index 42a6819..94bfab6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -151,7 +151,7 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
 while (!isDrained() && eventHandlingThread != null
 && eventHandlingThread.isAlive()
 && System.currentTimeMillis() < endTime) {
-  waitForDrained.wait(1000);
+  waitForDrained.wait(100);
   LOG.info("Waiting for AsyncDispatcher to drain. Thread state is :" +
   eventHandlingThread.getState());
 }
@@ -308,4 +308,8 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   protected boolean isDrained() {
 return drained;
   }
+
+  protected boolean isStopped() {
+return stopped;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b96ed438/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
index 1369465..c5ba072 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/DrainDispatcher.java
@@ -25,7 +25,6 @@ import java.util.concurrent.LinkedBlockingQueue;
 @SuppressWarnings("rawtypes")
 public class DrainDispatcher extends AsyncDispatcher {
   private volatile boolean drained = false;
-  private volatile boolean stopped = false;
   private final BlockingQueue queue;
   private final Object mutex;
 
@@ -69,7 +68,7 @@ public class DrainDispatcher extends AsyncDispatcher {
 return new Runnable() {
   @Override
   public void run() {
-while (!stopped && !Thread.currentThread().isInterrupted()) {
+while (!isStopped() && !Thread.currentThread().isInterrupted()) {
   synchronized (mutex) {
 // !drained if dispatch queued new events on this dispatcher
 drained = queue.isEmpty();
@@ -109,10 +108,4 @@ public class DrainDispatcher extends AsyncDispatcher {
   return drained;
 }
   }
-
-  @Override
-  protected void serviceStop() throws Exception {
-stopped = true;
-super.serviceStop();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b96ed438/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
index 018096b..2b9d745 100644
---