Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services f04eb0208 -> c22be358a (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
new file mode 100644
index 0000000..b8bdc59
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto
@@ -0,0 +1,396 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.slider.api.proto";
+option java_outer_classname = "Messages";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package org.apache.slider.api;
+
+//import "Security.proto";
+
+/*
+  Look at SliderClusterProtocol.proto to see how to build this
+*/
+
+message RoleInstanceState {
+  required string name =        1;
+  optional string role =        2;
+  required uint32 state =       4;
+  required uint32 exitCode =    5;
+  optional string command =     6;
+  optional string diagnostics = 7;
+  repeated string output =      8;
+  repeated string environment = 9;
+  required uint32 roleId =     10;
+  required bool released =     11;
+  required int64 createTime =  12;
+  required int64 startTime =   13;
+  required string host =       14;
+  required string hostURL =    15;
+  optional string appVersion = 16;
+}
+
+/**
+ * stop the cluster
+ */
+message StopClusterRequestProto {
+  /**
+  message to include
+  */
+  required string message = 1;
+}
+
+/**
+ * stop the cluster
+ */
+message StopClusterResponseProto {
+}
+
+/**
+ * upgrade the containers
+ */
+message UpgradeContainersRequestProto {
+  /**
+  message to include
+  */
+  required string message =     1;
+  repeated string container =   2;
+  repeated string component =   3;
+}
+
+/**
+ * upgrade the containers
+ */
+message UpgradeContainersResponseProto {
+}
+
+/**
+ * flex the cluster
+ */
+message FlexClusterRequestProto {
+  required string clusterSpec = 1;
+}
+
+
+/**
+ * flex the cluster
+ */
+message FlexClusterResponseProto {
+  required bool response = 1;
+}
+
+
+/**
+ * void request
+ */
+message GetJSONClusterStatusRequestProto {
+}
+
+/**
+ * response
+ */
+message GetJSONClusterStatusResponseProto {
+  required string clusterSpec = 1;
+}
+
+/**
+ * list the nodes in a role
+ */
+message ListNodeUUIDsByRoleRequestProto {
+  required string role = 1;
+}
+
+/**
+ * list the nodes in a role
+ */
+message ListNodeUUIDsByRoleResponseProto {
+  repeated string uuid = 1 ;
+}
+
+/**
+ * get a node
+ */
+message GetNodeRequestProto {
+  required string uuid = 1;
+}
+
+
+/**
+ * response on a node
+ */
+message GetNodeResponseProto {
+   required RoleInstanceState clusterNode = 1 ;
+}
+
+/**
+ * list the nodes for the UUDs
+ */
+message GetClusterNodesRequestProto {
+  repeated string uuid = 1 ;
+}
+
+/**
+ * list the nodes in a role
+ */
+message GetClusterNodesResponseProto {
+  repeated RoleInstanceState clusterNode = 1 ;
+}
+
+/**
+ * Echo
+ */
+message EchoRequestProto {
+  required string text = 1;
+}
+
+/**
+ * Echo reply
+ */
+message EchoResponseProto {
+  required string text = 1;
+}
+
+
+/**
+ * Kill a container
+ */
+message KillContainerRequestProto {
+  required string id = 1;
+}
+
+/**
+ * Kill reply
+ */
+message KillContainerResponseProto {
+  required bool success = 1;
+}
+
+/**
+ * AM suicide
+ */
+message AMSuicideRequestProto {
+  required string text =      1;
+  required int32 signal =     2;
+  required int32 delay =      3;
+}
+
+/**
+ * AM suicide reply. For this to be returned implies
+ * a failure of the AM to kill itself
+ */
+message AMSuicideResponseProto {
+
+}
+
+
+/**
+ * Ask for the instance definition details
+ */
+message GetInstanceDefinitionRequestProto {
+
+}
+
+/**
+ * Get the definition back as three separate JSON strings
+ */
+message GetInstanceDefinitionResponseProto {
+  required string internal =        1;
+  required string resources =       2;
+  required string application =     3;
+}
+
+
+  /* ************************************************************************
+  
+  REST model and operations.
+  Below here the operations and payloads designed to mimic
+  the REST API. That API is now the source of those
+  specificatations; this is simply a derivative.
+  
+  **************************************************************************/
+
+/**
+ * See org.apache.slider.api.types.ApplicationLivenessInformation
+ */
+message ApplicationLivenessInformationProto {
+  optional bool allRequestsSatisfied = 1;
+  optional int32 requestsOutstanding = 2;
+}
+
+/*
+ * see org.apache.slider.api.types.ComponentInformation
+ */
+message ComponentInformationProto {
+  optional string name =           1;
+  optional int32 priority =        2;
+  optional int32 desired =         3;
+  optional int32 actual =          4;
+  optional int32 releasing =       5;
+  optional int32 requested =       6;
+  optional int32 failed =          7;
+  optional int32 started =         8;
+  optional int32 startFailed =     9;
+  optional int32 completed =      10;
+  optional int32 totalRequested = 11;
+  optional string failureMessage =12;
+  optional int32 placementPolicy =13;
+  repeated string containers =    14;
+  optional int32 failedRecently = 15;
+  optional int32 nodeFailed =     16;
+  optional int32 preempted =      17;
+  optional int32 pendingAntiAffineRequestCount = 18;
+  optional bool isAARequestOutstanding = 19;
+}
+
+/*
+ * see org.apache.slider.api.types.ContainerInformation
+ */
+message ContainerInformationProto {
+  optional string containerId =   1;
+  optional string component =     2;
+  optional bool released =        3;
+  optional int32 state =          4;
+  optional int32 exitCode =       5;
+  optional string diagnostics =   6;
+  optional int64 createTime =     7;
+  optional int64 startTime =      8;
+  repeated string output =        9;
+  optional string host =         10;
+  optional string hostURL =      11;
+  optional string placement =    12;
+  optional string appVersion =   13;
+}
+
+
+/*
+ * see org.apache.slider.api.types.PingInformation
+ */
+message PingInformationProto {
+  optional string text = 1;
+  optional string verb = 2;
+  optional string body = 3;
+  optional int64 time =  4;
+}
+
+message NodeEntryInformationProto {
+  required int32 priority =      1;
+  required int32 requested =     2;
+  required int32 starting =      3;
+  required int32 startFailed =   4;
+  required int32 failed =        5;
+  required int32 failedRecently= 6;
+  required int32 preempted =     7;
+  required int32 live =          8;
+  required int32 releasing =     9;
+  required int64 lastUsed =     10;
+  required string name =        11;
+}
+
+message NodeInformationProto {
+  required string hostname =    1;
+  required string state =       2;
+  required string httpAddress = 3;
+  required string rackName =    4;
+  required string labels =      5;
+  required string healthReport= 6;
+  required int64 lastUpdated =  7;
+  repeated NodeEntryInformationProto entries = 8;
+}
+
+message GetModelRequestProto {
+}
+
+message GetModelDesiredRequestProto {
+}
+
+message GetModelDesiredAppconfRequestProto {
+}
+
+message GetModelDesiredResourcesRequestProto {
+}
+
+message GetModelResolvedAppconfRequestProto {
+}
+
+message GetModelResolvedResourcesRequestProto {
+}
+
+message GetModelLiveResourcesRequestProto {
+}
+
+message GetLiveContainersRequestProto {
+}
+
+message GetLiveContainersResponseProto {
+  repeated string names = 1;
+  repeated ContainerInformationProto containers = 2;
+}
+
+message GetLiveContainerRequestProto {
+  required string containerId = 1;
+}
+
+
+message GetLiveComponentsRequestProto {
+}
+
+message GetLiveComponentsResponseProto {
+
+  repeated string names = 1;
+  repeated ComponentInformationProto components = 2;
+}
+
+message GetLiveComponentRequestProto {
+  required string name = 1;
+}
+
+message GetApplicationLivenessRequestProto {
+}
+
+message EmptyPayloadProto {
+}
+
+/**
+  Generic JSON, often containing data structures serialized as a string
+*/
+message WrappedJsonProto {
+  required string json = 1;
+}
+
+message GetCertificateStoreRequestProto {
+  optional string hostname =    1;
+  required string requesterId = 2;
+  required string password =    3;
+  required string type =        4;
+}
+
+message GetCertificateStoreResponseProto {
+  required bytes store = 1;
+}
+
+message GetLiveNodesRequestProto {
+}
+
+message GetLiveNodesResponseProto {
+  repeated NodeInformationProto nodes = 1;
+}
+
+message GetLiveNodeRequestProto {
+  required string name = 1;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
new file mode 100644
index 0000000..4221b1d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option java_package = "org.apache.slider.api.proto";
+option java_outer_classname = "SliderClusterAPI";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package org.apache.slider.api;
+
+/*
+
+Compiling
+
+Maven: How to do it as part of the build
+ mvn install -DskipTests -Pcompile-protobuf
+
+How to do it so as to get error messages
+
+protoc --java_out=src/main/java \
+ -Isrc/main/proto src/main/proto/SliderClusterMessages.proto \
+  src/main/proto/SliderClusterProtocol.proto
+
+Once happy: commit the changes
+
+*/
+
+//import "Security.proto";
+import "SliderClusterMessages.proto";
+
+
+/**
+ * Protocol used from between Slider Client and AM
+ */
+service SliderClusterProtocolPB {
+
+  /**
+   * Stop the cluster
+   */
+
+  rpc stopCluster(StopClusterRequestProto) 
+    returns(StopClusterResponseProto);
+    
+  /**
+   * Upgrade containers 
+   */
+  rpc upgradeContainers(UpgradeContainersRequestProto) 
+    returns(UpgradeContainersResponseProto);
+
+  /**
+   * Flex the cluster. 
+   */
+  rpc flexCluster(FlexClusterRequestProto) 
+    returns(FlexClusterResponseProto);
+
+  /**
+   * Get the current cluster status
+   */
+  rpc getJSONClusterStatus(GetJSONClusterStatusRequestProto)
+    returns(GetJSONClusterStatusResponseProto);
+      
+  /**
+   * Get the instance definition
+   */
+  rpc getInstanceDefinition(GetInstanceDefinitionRequestProto)
+   returns(GetInstanceDefinitionResponseProto);
+      
+  /**
+   * List all running nodes in a role
+   */
+  rpc listNodeUUIDsByRole(ListNodeUUIDsByRoleRequestProto)
+    returns(ListNodeUUIDsByRoleResponseProto);
+
+  /**
+   * Get the details on a node
+   */
+  rpc getNode(GetNodeRequestProto)
+    returns(GetNodeResponseProto);
+
+  /**
+   * Get the 
+   * details on a list of nodes.
+   * Unknown nodes are not returned
+   * <i>Important: the order of the results are undefined</i>
+   */
+  rpc getClusterNodes(GetClusterNodesRequestProto)
+    returns(GetClusterNodesResponseProto);
+    
+   /**
+    * echo some text
+    */
+   rpc echo(EchoRequestProto)
+     returns(EchoResponseProto); 
+
+   /**
+    * kill a container
+    */
+   rpc killContainer(KillContainerRequestProto)
+     returns(KillContainerResponseProto);
+      
+   /**
+    * kill the AM
+    */
+   rpc amSuicide(AMSuicideRequestProto)
+     returns(AMSuicideResponseProto);
+
+  /* ************************************************************************
+  
+  REST model and operations.
+  Below here the operations and payloads designed to mimic
+  the REST API. That API is now the source of those
+  specificatations; this is simply a derivative.
+  
+  **************************************************************************/
+
+  rpc getLivenessInformation(GetApplicationLivenessRequestProto) 
+    returns(ApplicationLivenessInformationProto);
+
+  rpc getLiveContainers(GetLiveContainersRequestProto) 
+    returns(GetLiveContainersResponseProto);
+
+  rpc getLiveContainer(GetLiveContainerRequestProto) 
+    returns(ContainerInformationProto);
+
+  rpc getLiveComponents(GetLiveComponentsRequestProto) 
+    returns(GetLiveComponentsResponseProto);
+
+  rpc getLiveComponent(GetLiveComponentRequestProto) 
+    returns(ComponentInformationProto);
+
+  rpc getLiveNodes(GetLiveNodesRequestProto)
+    returns(GetLiveNodesResponseProto);
+
+  rpc getLiveNode(GetLiveNodeRequestProto)
+    returns(NodeInformationProto);
+  
+// AggregateConf getModelDesired()
+  rpc getModelDesired(EmptyPayloadProto) 
+    returns(WrappedJsonProto);
+
+  // ConfTree getModelDesiredAppconf
+  rpc getModelDesiredAppconf(EmptyPayloadProto) 
+    returns(WrappedJsonProto);
+
+    // ConfTree getModelDesiredResources
+  rpc getModelDesiredResources(EmptyPayloadProto) 
+    returns(WrappedJsonProto);
+  
+// AggregateConf getModelResolved()
+  rpc getModelResolved(EmptyPayloadProto) 
+    returns(WrappedJsonProto);
+
+  // ConfTree getModelResolvedAppconf
+  rpc getModelResolvedAppconf(EmptyPayloadProto) 
+    returns(WrappedJsonProto);
+
+    // ConfTree getModelResolvedResources
+  rpc getModelResolvedResources(EmptyPayloadProto) 
+    returns(WrappedJsonProto);
+
+    // ConfTree getLiveResources
+  rpc getLiveResources(EmptyPayloadProto) 
+    returns(WrappedJsonProto);
+
+  rpc getClientCertificateStore(GetCertificateStoreRequestProto)
+    returns(GetCertificateStoreResponseProto);
+
+  
+  
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..9e67c15
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1,15 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+org.apache.slider.server.appmaster.rpc.SliderRPCSecurityInfo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties
new file mode 100644
index 0000000..65a7ad0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#  
+#       http://www.apache.org/licenses/LICENSE-2.0
+#  
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+# This is a log4j config for slider
+
+log4j.rootLogger=INFO,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{3} 
(%F:%M(%L)) - %m%n
+
+log4j.appender.subprocess=org.apache.log4j.ConsoleAppender
+log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout
+log4j.appender.subprocess.layout.ConversionPattern=%c{1}: %m%n
+
+
+#at debug this provides details on what is going on
+log4j.logger.org.apache.slider=DEBUG
+#log4j.logger.org.apache.slider.exec.RunLongLivedApp=ERROR
+
+log4j.logger.org.apache.hadoop.security=DEBUG
+log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG
+log4j.logger.org.apache.hadoop.yarn.service=DEBUG
+log4j.logger.org.apache.hadoop.yarn.client=DEBUG
+#crank back on some noise
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN
+log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN
+log4j.logger.org.apache.hadoop.yarn.client.RMProxy=WARN
+
+# for test runs we don't care about native code
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+# HDFS is noise on tets
+log4j.logger.org.apache.hadoop.hdfs.server.datanode=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.namenode=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN
+log4j.logger.org.apache.hadoop.hdfs=WARN
+
+log4j.logger.org.apache.zookeeper=WARN

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt
new file mode 100644
index 0000000..79c1972
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt
@@ -0,0 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+This is the conf directory for the python agent
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json
new file mode 100644
index 0000000..197a046
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json
@@ -0,0 +1,168 @@
+{
+  "roleCommand": "START",
+  "clusterName": "c1",
+  "hostname": "c6402.ambari.apache.org",
+  "hostLevelParams": {
+    "java_home": "/usr/jdk64/jdk1.7.0_45"
+  },
+  "commandType": "EXECUTION_COMMAND",
+  "roleParams": {},
+  "serviceName": "HBASE",
+  "role": "HBASE_MASTER",
+  "commandParams": {},
+  "taskId": 24,
+  "public_hostname": "c6402.ambari.apache.org",
+  "configurations": {
+    "hbase-log4j": {
+      "log4j.threshold": "ALL",
+      "log4j.rootLogger": "${hbase.root.logger}",
+      "log4j.logger.org.apache.zookeeper": "INFO",
+      "log4j.logger.org.apache.hadoop.hbase": "DEBUG",
+      "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher": 
"INFO",
+      "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil": "INFO",
+      "log4j.category.SecurityLogger": "${hbase.security.logger}",
+      "log4j.appender.console": "org.apache.log4j.ConsoleAppender",
+      "log4j.appender.console.target": "System.err",
+      "log4j.appender.console.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.console.layout.ConversionPattern": "%d{ISO8601} %-5p 
[%t] %c{2}: %m%n",
+      "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender",
+      "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: 
%m%n",
+      "log4j.appender.RFAS.MaxFileSize": "${hbase.security.log.maxfilesize}",
+      "log4j.appender.RFAS.MaxBackupIndex": 
"${hbase.security.log.maxbackupindex}",
+      "log4j.appender.RFAS.File": 
"${hbase.log.dir}/${hbase.security.log.file}",
+      "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender",
+      "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] 
%c{2}: %m%n",
+      "log4j.appender.RFA.MaxFileSize": "${hbase.log.maxfilesize}",
+      "log4j.appender.RFA.MaxBackupIndex": "${hbase.log.maxbackupindex}",
+      "log4j.appender.RFA.File": "${hbase.log.dir}/${hbase.log.file}",
+      "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender",
+      "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender",
+      "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] 
%c{2}: %m%n",
+      "log4j.appender.DRFA.File": "${hbase.log.dir}/${hbase.log.file}",
+      "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd",
+      "log4j.additivity.SecurityLogger": "false",
+      "hbase.security.logger": "INFO,console",
+      "hbase.security.log.maxfilesize": "256MB",
+      "hbase.security.log.maxbackupindex": "20",
+      "hbase.security.log.file": "SecurityAuth.audit",
+      "hbase.root.logger": "INFO,console",
+      "hbase.log.maxfilesize": "256MB",
+      "hbase.log.maxbackupindex": "20",
+      "hbase.log.file": "hbase.log",
+      "hbase.log.dir": "."
+    },
+    "global": {
+      "hbase_root": "/share/hbase/hbase-0.96.1-hadoop2",
+      "hbase_pid_dir": "/var/run/hbase",
+      "proxyuser_group": "users",
+      "syncLimit": "5",
+      "hbase_regionserver_heapsize": "1024m",
+      "rca_enabled": "false",
+      "tickTime": "2000",
+      "hbase_master_heapsize": "1024m",
+      "initLimit": "10",
+      "user_group": "hadoop",
+      "hbase_user": "hbase",
+      "hbase_log_dir": "/var/log/hbase"
+    },
+    "hdfs-site": {
+      "dfs.namenode.checkpoint.period": "21600",
+      "dfs.namenode.avoid.write.stale.datanode": "true",
+      "dfs.namenode.checkpoint.txns": "1000000",
+      "dfs.block.access.token.enable": "true",
+      "dfs.support.append": "true",
+      "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+      "dfs.cluster.administrators": " hdfs",
+      "dfs.replication": "3",
+      "ambari.dfs.datanode.http.port": "50075",
+      "dfs.datanode.balance.bandwidthPerSec": "6250000",
+      "dfs.namenode.safemode.threshold-pct": "1.0f",
+      "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+      "dfs.permissions.enabled": "true",
+      "dfs.client.read.shortcircuit": "true",
+      "dfs.namenode.https-address": "c6402.ambari.apache.org:50470",
+      "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+      "dfs.blocksize": "134217728",
+      "dfs.datanode.max.transfer.threads": "1024",
+      "dfs.datanode.du.reserved": "1073741824",
+      "dfs.webhdfs.enabled": "true",
+      "dfs.namenode.handler.count": "100",
+      "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+      "fs.permissions.umask-mode": "022",
+      "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}",
+      "dfs.datanode.ipc.address": "0.0.0.0:8010",
+      "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+      "dfs.namenode.http-address": "c6402.ambari.apache.org:50070",
+      "dfs.blockreport.initialDelay": "120",
+      "dfs.datanode.failed.volumes.tolerated": "0",
+      "dfs.namenode.accesstime.precision": "0",
+      "ambari.dfs.datanode.port": "50010",
+      "dfs.namenode.avoid.read.stale.datanode": "true",
+      "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+      "dfs.namenode.stale.datanode.interval": "30000",
+      "dfs.heartbeat.interval": "3",
+      "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+      "dfs.permissions.superusergroup": "hdfs",
+      "dfs.https.port": "50470",
+      "dfs.journalnode.http-address": "0.0.0.0:8480",
+      "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+      "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+      "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+      "dfs.datanode.data.dir.perm": "750",
+      "dfs.namenode.name.dir.restore": "true",
+      "dfs.replication.max": "50",
+      "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+    },
+    "hbase-site": {
+      "hbase.hstore.flush.retries.number": "120",
+      "hbase.client.keyvalue.maxsize": "10485760",
+      "hbase.hstore.compactionThreshold": "3",
+      "hbase.rootdir": "hdfs://c6402.ambari.apache.org:8020/apps/hbase/data",
+      "hbase.stagingdir": 
"hdfs://c6402.ambari.apache.org:8020/apps/hbase/staging",
+      "hbase.regionserver.handler.count": "60",
+      "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+      "hbase.hregion.memstore.block.multiplier": "2",
+      "hbase.hregion.memstore.flush.size": "134217728",
+      "hbase.superuser": "hbase",
+      "hbase.zookeeper.property.clientPort": "2181",
+      "hbase.regionserver.global.memstore.upperLimit": "0.4",
+      "zookeeper.session.timeout": "30000",
+      "hbase.tmp.dir": "/hadoop/hbase",
+      "hbase.hregion.max.filesize": "10737418240",
+      "hfile.block.cache.size": "0.40",
+      "hbase.security.authentication": "simple",
+      "hbase.defaults.for.version.skip": "true",
+      "hbase.zookeeper.quorum": "c6402.ambari.apache.org",
+      "zookeeper.znode.parent": "/hbase-unsecure",
+      "hbase.hstore.blockingStoreFiles": "10",
+      "hbase.hregion.majorcompaction": "86400000",
+      "hbase.security.authorization": "false",
+      "hbase.cluster.distributed": "true",
+      "hbase.hregion.memstore.mslab.enabled": "true",
+      "hbase.client.scanner.caching": "100",
+      "hbase.zookeeper.useMulti": "true",
+      "hbase.regionserver.info.port": "0",
+      "hbase.master.info.port": "60010"
+    },
+    "core-site": {
+      "io.serializations": 
"org.apache.hadoop.io.serializer.WritableSerialization",
+      "gluster.daemon.user": "null",
+      "fs.trash.interval": "360",
+      "hadoop.security.authentication": "simple",
+      "io.compression.codecs": 
"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+      "mapreduce.jobtracker.webinterface.trusted": "false",
+      "fs.AbstractFileSystem.glusterfs.impl": "null",
+      "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020",
+      "ipc.client.connect.max.retries": "50",
+      "ipc.client.idlethreshold": "8000",
+      "io.file.buffer.size": "131072",
+      "hadoop.security.authorization": "false",
+      "hadoop.security.auth_to_local": "\n        
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n  
      RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+      "ipc.client.connection.maxidletime": "30000"
+    }
+  },
+  "commandId": "2-2"
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command_template.json
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command_template.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command_template.json
new file mode 100644
index 0000000..da06c13
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command_template.json
@@ -0,0 +1,168 @@
+{
+  "roleCommand": "{{COMMAND}}",
+  "clusterName": "{{CLUSTER_NAME}}",
+  "hostname": "{{HOST_NAME}}",
+  "hostLevelParams": {
+    "java_home": "/usr/jdk64/jdk1.7.0_45"
+  },
+  "commandType": "EXECUTION_COMMAND",
+  "roleParams": {},
+  "serviceName": "{{SERVICE_NAME}}",
+  "role": "{{ROLE_NAME}}",
+  "commandParams": {},
+  "taskId": "{{TASK_ID}}",
+  "public_hostname": "{{HOST_NAME}}",
+  "configurations": {
+    "hbase-log4j": {
+      "log4j.threshold": "ALL",
+      "log4j.rootLogger": "${hbase.root.logger}",
+      "log4j.logger.org.apache.zookeeper": "INFO",
+      "log4j.logger.org.apache.hadoop.hbase": "DEBUG",
+      "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher": 
"INFO",
+      "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil": "INFO",
+      "log4j.category.SecurityLogger": "${hbase.security.logger}",
+      "log4j.appender.console": "org.apache.log4j.ConsoleAppender",
+      "log4j.appender.console.target": "System.err",
+      "log4j.appender.console.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.console.layout.ConversionPattern": "%d{ISO8601} %-5p 
[%t] %c{2}: %m%n",
+      "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender",
+      "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: 
%m%n",
+      "log4j.appender.RFAS.MaxFileSize": "${hbase.security.log.maxfilesize}",
+      "log4j.appender.RFAS.MaxBackupIndex": 
"${hbase.security.log.maxbackupindex}",
+      "log4j.appender.RFAS.File": 
"${hbase.log.dir}/${hbase.security.log.file}",
+      "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender",
+      "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] 
%c{2}: %m%n",
+      "log4j.appender.RFA.MaxFileSize": "${hbase.log.maxfilesize}",
+      "log4j.appender.RFA.MaxBackupIndex": "${hbase.log.maxbackupindex}",
+      "log4j.appender.RFA.File": "${hbase.log.dir}/${hbase.log.file}",
+      "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender",
+      "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender",
+      "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout",
+      "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] 
%c{2}: %m%n",
+      "log4j.appender.DRFA.File": "${hbase.log.dir}/${hbase.log.file}",
+      "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd",
+      "log4j.additivity.SecurityLogger": "false",
+      "hbase.security.logger": "INFO,console",
+      "hbase.security.log.maxfilesize": "256MB",
+      "hbase.security.log.maxbackupindex": "20",
+      "hbase.security.log.file": "SecurityAuth.audit",
+      "hbase.root.logger": "INFO,console",
+      "hbase.log.maxfilesize": "256MB",
+      "hbase.log.maxbackupindex": "20",
+      "hbase.log.file": "hbase.log",
+      "hbase.log.dir": "."
+    },
+    "global": {
+      "hbase_root": "{{HBASE_HOME}}",
+      "hbase_pid_dir": "{{PID_DIR}}",
+      "proxyuser_group": "users",
+      "syncLimit": "5",
+      "hbase_regionserver_heapsize": "{{REGION_SERVER_HEAP_SIZE}}",
+      "rca_enabled": "false",
+      "tickTime": "2000",
+      "hbase_master_heapsize": "{{MASTER_HEAP_SIZE}}",
+      "initLimit": "10",
+      "user_group": "{{GROUP_NAME}}",
+      "hbase_user": "{{USER_NAME}}",
+      "hbase_log_dir": "{{LOG_DIR}}"
+    },
+    "hdfs-site": {
+      "dfs.namenode.checkpoint.period": "21600",
+      "dfs.namenode.avoid.write.stale.datanode": "true",
+      "dfs.namenode.checkpoint.txns": "1000000",
+      "dfs.block.access.token.enable": "true",
+      "dfs.support.append": "true",
+      "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}",
+      "dfs.cluster.administrators": " hdfs",
+      "dfs.replication": "3",
+      "ambari.dfs.datanode.http.port": "50075",
+      "dfs.datanode.balance.bandwidthPerSec": "6250000",
+      "dfs.namenode.safemode.threshold-pct": "1.0f",
+      "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
+      "dfs.permissions.enabled": "true",
+      "dfs.client.read.shortcircuit": "true",
+      "dfs.namenode.https-address": "{{NAMENODE_HTTPS_ADDRESS}}",
+      "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal",
+      "dfs.blocksize": "134217728",
+      "dfs.datanode.max.transfer.threads": "1024",
+      "dfs.datanode.du.reserved": "1073741824",
+      "dfs.webhdfs.enabled": "true",
+      "dfs.namenode.handler.count": "100",
+      "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
+      "fs.permissions.umask-mode": "022",
+      "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}",
+      "dfs.datanode.ipc.address": "0.0.0.0:8010",
+      "dfs.datanode.data.dir": "/hadoop/hdfs/data",
+      "dfs.namenode.http-address": "{{NAMENODE_HTTP_ADDRESS}}",
+      "dfs.blockreport.initialDelay": "120",
+      "dfs.datanode.failed.volumes.tolerated": "0",
+      "dfs.namenode.accesstime.precision": "0",
+      "ambari.dfs.datanode.port": "50010",
+      "dfs.namenode.avoid.read.stale.datanode": "true",
+      "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090",
+      "dfs.namenode.stale.datanode.interval": "30000",
+      "dfs.heartbeat.interval": "3",
+      "dfs.client.read.shortcircuit.streams.cache.size": "4096",
+      "dfs.permissions.superusergroup": "hdfs",
+      "dfs.https.port": "50470",
+      "dfs.journalnode.http-address": "0.0.0.0:8480",
+      "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
+      "dfs.namenode.write.stale.datanode.ratio": "1.0f",
+      "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude",
+      "dfs.datanode.data.dir.perm": "750",
+      "dfs.namenode.name.dir.restore": "true",
+      "dfs.replication.max": "50",
+      "dfs.namenode.name.dir": "/hadoop/hdfs/namenode"
+    },
+    "hbase-site": {
+      "hbase.hstore.flush.retries.number": "120",
+      "hbase.client.keyvalue.maxsize": "10485760",
+      "hbase.hstore.compactionThreshold": "3",
+      "hbase.rootdir": "{{HBASE_ROOT_DIR}}",
+      "hbase.stagingdir": "{{HBASE_STAGING_DIR}}",
+      "hbase.regionserver.handler.count": "60",
+      "hbase.regionserver.global.memstore.lowerLimit": "0.38",
+      "hbase.hregion.memstore.block.multiplier": "2",
+      "hbase.hregion.memstore.flush.size": "134217728",
+      "hbase.superuser": "{{HBASE_SUPERUSER}}",
+      "hbase.zookeeper.property.clientPort": "{{ZK_CLIENT_PORT}}",
+      "hbase.regionserver.global.memstore.upperLimit": "0.4",
+      "zookeeper.session.timeout": "30000",
+      "hbase.tmp.dir": "/hadoop/hbase",
+      "hbase.hregion.max.filesize": "10737418240",
+      "hfile.block.cache.size": "0.40",
+      "hbase.security.authentication": "simple",
+      "hbase.defaults.for.version.skip": "true",
+      "hbase.zookeeper.quorum": "{{ZK_HOSTS}}",
+      "zookeeper.znode.parent": "{{ZK_NODE_PARENT}}",
+      "hbase.hstore.blockingStoreFiles": "10",
+      "hbase.hregion.majorcompaction": "86400000",
+      "hbase.security.authorization": "false",
+      "hbase.cluster.distributed": "true",
+      "hbase.hregion.memstore.mslab.enabled": "true",
+      "hbase.client.scanner.caching": "100",
+      "hbase.zookeeper.useMulti": "true",
+      "hbase.regionserver.info.port": "{{REGION_SERVER_INFO_PORT}}",
+      "hbase.master.info.port": "{{MASTER_INFO_PORT}}"
+    },
+    "core-site": {
+      "io.serializations": 
"org.apache.hadoop.io.serializer.WritableSerialization",
+      "gluster.daemon.user": "null",
+      "fs.trash.interval": "360",
+      "hadoop.security.authentication": "simple",
+      "io.compression.codecs": 
"org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec",
+      "mapreduce.jobtracker.webinterface.trusted": "false",
+      "fs.AbstractFileSystem.glusterfs.impl": "null",
+      "fs.defaultFS": "{{DEFAULT_FS}}",
+      "ipc.client.connect.max.retries": "50",
+      "ipc.client.idlethreshold": "8000",
+      "io.file.buffer.size": "131072",
+      "hadoop.security.authorization": "false",
+      "hadoop.security.auth_to_local": "\n        
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n  
      RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT",
+      "ipc.client.connection.maxidletime": "30000"
+    }
+  },
+  "commandId": "{{COMMAND_ID}}"
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/role-node.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/role-node.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/role-node.xml
new file mode 100644
index 0000000..aff1e05
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/role-node.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+  <!--
+  Role options for an agent-managed node
+  -->
+<configuration>
+  <property>
+    <name>role.name</name>
+    <value>node</value>
+  </property>
+  
+  <property>
+    <name>role.instances</name>
+    <value>1</value>
+  </property>
+    
+  <property>
+    <name>role.priority</name>
+    <value>1</value>
+  </property>
+      
+  <property>
+    <name>role.placement.policy</name>
+    <value>2</value>
+  </property>
+  
+  <property>
+    <name>yarn.memory</name>
+    <value>256</value>
+  </property>
+  
+  <property>
+    <name>yarn.vcores</name>
+    <value>1</value>
+  </property>
+  
+  <property>
+    <name>jvm.heapsize</name>
+    <value>256M</value>
+  </property>
+  
+  <property>
+    <name>env.MALLOC_ARENA_MAX</name>
+    <value>4</value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/dynamic/application.properties
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/dynamic/application.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/dynamic/application.properties
new file mode 100644
index 0000000..d9b42de
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/dynamic/application.properties
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#  
+#       http://www.apache.org/licenses/LICENSE-2.0
+#  
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License.
+#
+
+# gets updated at build time
+application.name=${pom.name}
+application.version=${pom.version}
+application.build=${buildNumber}
+application.build.java.version=${java.version}
+application.build.user=${user.name}
+application.build.info=${pom.name}-${pom.version} Built against commit# 
${buildNumber} on Java ${java.version} by ${user.name}
+hadoop.build.info=${hadoop.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
new file mode 100644
index 0000000..81239a2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/appconf.json
@@ -0,0 +1,19 @@
+{
+  "schema": "http://example.org/specification/v2.0.0";,
+
+  "metadata": {
+
+
+  },
+
+  "global": {
+    "env.MALLOC_ARENA_MAX": "4"
+  },
+
+  "components": {
+    "slider-appmaster" : {
+      "jvm.heapsize": "256M"
+    }
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/internal.json
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/internal.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/internal.json
new file mode 100644
index 0000000..2367d8f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/internal.json
@@ -0,0 +1,17 @@
+{
+  "schema": "http://example.org/specification/v2.0.0";,
+
+  "metadata": {
+  },
+
+  "global": {
+    "internal.container.failure.shortlife": "60000",
+    "internal.container.failure.threshold": "5",
+    "slider.cluster.directory.permissions": "0770",
+    "slider.data.directory.permissions": "0770"
+  },
+
+  "components": {
+
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/resources.json
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/resources.json
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/resources.json
new file mode 100644
index 0000000..478ab7e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/slideram/instance/resources.json
@@ -0,0 +1,18 @@
+{
+  "schema": "http://example.org/specification/v2.0.0";,
+
+  "metadata": {
+ 
+  },
+
+  "global": {
+  },
+
+  "components": {
+    "slider-appmaster": {
+      "yarn.component.instances": "1",
+      "yarn.vcores": "1",
+      "yarn.memory": "1024"
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
new file mode 100644
index 0000000..37ac65c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/slider.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+<configuration>
+  <property>
+    <name>slider.config.loaded</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>slider.provider.agent</name>
+    <value>org.apache.slider.providers.agent.AgentProviderFactory</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/slider_keytabs.sh
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/slider_keytabs.sh
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/slider_keytabs.sh
new file mode 100644
index 0000000..f0a8fc2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/slider_keytabs.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script exists to create the keytab set for a node on the cluster
+# including hbase and ZK alongside then YARN cores.
+
+# usage
+# keytabs <realm> <hostname>
+# validate the args
+
+num_vars=$#
+if [[ $num_vars < 2 ]]
+then
+  echo "Usage: $0 <realm> <hostname>"
+  exit -2
+fi
+
+realm="$1"
+hostname="$2"
+dest="."
+
+kadmin=kadmin.local
+
+${kadmin} <<EOF
+addprinc -randkey hdfs/${hostname}@${realm}
+addprinc -randkey yarn/${hostname}@${realm}
+addprinc -randkey HTTP/${hostname}@${realm}
+addprinc -randkey hbase/${hostname}@${realm}
+addprinc -randkey zookeeper/${hostname}@${realm}
+
+ktadd -norandkey -k ${dest}/hdfs.keytab  \
+  hdfs/${hostname}@${realm} \
+  HTTP/${hostname}@${realm}
+
+ktadd -norandkey -k ${dest}/yarn.keytab  \
+  yarn/${hostname}@${realm} \
+  HTTP/${hostname}@${realm}
+
+ktadd -norandkey -k ${dest}/hbase.keytab  \
+  hbase/${hostname}@${realm} 
+
+ktadd -norandkey -k ${dest}/zookeeper.keytab  \
+  zookeeper/${hostname}@${realm} 
+EOF
+
+exitcode=$?
+if  [[ $exitcode != 0 ]]
+then
+  echo "keytab generation from ${kadmin} failed with exit code $exitcode"
+  exit $exitcode
+else
+  echo "keytab files for ${hostname}@${realm} created"
+fi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/yarnservice.py
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/yarnservice.py
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/yarnservice.py
new file mode 100644
index 0000000..1208c28
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/scripts/yarnservice.py
@@ -0,0 +1,383 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Launches a yarn service
+
+WORK IN PROGRESS, IGNORE
+
+This is as work in progress project to build as new launcher script for
+any Hadoop service
+A key feature here is that the configs are defined in JSON files -
+files that are read in the order passed down, and merged into each other.
+
+The final merged file is used to define the java command to execute
+-and hadoop XML files.
+
+
+It uses a JSON config file 
+  --jfile configuration file (JSON format)
+  -class classname
+  -Dname=value -arbitrary value to pass down to the JVM
+  --java: any JVM arg
+  -javaX: javaX value
+
+
+ after an -- , all following commands are passed straight down to the invoked 
process.
+  # -xJ name=value JVM options. No: this is just another param
+  -xF file  file to load next. Files are loaded in order. 
+  -xD name=value again, values are loaded in order
+  -xU undefine
+  -xX main class, 'eXecute'
+
+  --  end of arguments
+  
+
+"""
+
+import sys
+# see : http://simplejson.readthedocs.org/en/latest/
+# and install w/ easy_install simplejson
+import simplejson
+
+KEY_JFILE = "-xF"
+KEY_DEF = "-xD"
+KEY_UNDEF = "-xU"
+KEY_EXEC = "-xX"
+KEY_ARGS = "--"
+
+COMMANDS = [KEY_JFILE, KEY_DEF, KEY_EXEC]
+
+#
+
+def debug(string) :
+  print string
+
+
+def pop_required_arg(arglist, previousArg) :
+  """
+  Pop the first element off the list and return it.
+  If the list is empty, raise an exception about a missing argument after the 
$previousArgument
+  """
+  if not len(arglist) :
+    raise Exception, "Missing required parameter after %s" % previousArg
+  head = arglist[0]
+  del arglist[0]
+  return head
+
+
+def parse_one_jfile(filename) :
+  """
+  read in the given config file
+  """
+  parsed = simplejson.load(open(filename, "r"))
+  return parsed
+
+# hand down sys.argv:
+def extract_jfiles(args) :
+  """ takes a list of arg strings and separates them into jfile references
+  and other arguments.
+  """
+  l = len(args)
+  stripped = []
+  jfiles = []
+  index = 0
+  while index < l :
+    elt = args[index]
+    index += 1
+    if KEY_JFILE == elt :
+      # a match
+      if index == l :
+        #overshoot
+        raise Exception("Missing filename after " + KEY_JFILE)
+      filename = args[index]
+      debug("jfile " + filename)
+      jfiles.append(filename)
+      index += 1
+    else :
+      stripped.append(elt)
+  return jfiles, stripped
+
+
+def extract_args(args) :
+  """
+  Take a list of args, parse them or fail, generating a dictionary of actions
+  Return: dictionary and all leftover arguments
+  """
+  jfiles = []
+  execs = []
+  defs = []
+  remainder = []
+  while len(args) :
+    # the next call cannot fail, because of the len(args)
+    arg = pop_required_arg(args, "")
+    if KEY_JFILE == arg :
+      jfiles.append(pop_required_arg(args, KEY_JFILE))
+    elif KEY_DEF == arg :
+      defs.append((KEY_DEF, pop_required_arg(args, KEY_DEF)))
+    elif KEY_UNDEF == arg :
+      defs.append((KEY_UNDEF, pop_required_arg(args, KEY_UNDEF)))
+    elif KEY_EXEC == arg :
+      execs.append(pop_required_arg(args, KEY_EXEC))
+    elif KEY_ARGS == arg :
+      remainder += args
+      args = []
+    else :
+      remainder.append(arg)
+      #build the action list
+  actions = {
+    KEY_JFILE : jfiles,
+    KEY_EXEC : execs,
+    KEY_DEF : defs,
+    KEY_ARGS : remainder
+  }
+  #end of the run, there's a dictionary and a list of unparsed values
+  return actions
+
+
+def get(conf, key, defVal) :
+  if conf.has_key(key) :
+    return conf[key]
+  else :
+    return defVal
+
+
+def merge_json(conf, json) :
+  """ merge in a json dict with the existing one
+  in: configuration dict, json dict
+  out: configuration'
+  """
+  for (key, val) in json.items() :
+    if key in conf :
+      #there's a match, do a more detailed merge
+      oldval = conf[key]
+      if type(oldval) == dict and type(val) == dict :
+      # two dictionary instances -merge
+        merge_json(oldval, val)
+      else :
+        conf[key] = val
+    else :
+      conf[key] = val
+  return conf
+
+
+def merge_jfile(conf, filename) :
+  json = parse_one_jfile(filename)
+  return merge_json(conf, json)
+
+
+def merge_jfile_list(conf, jfiles) :
+  """ merge a list of jfiles on top of a conf dict
+  """
+  for jfile in jfiles :
+    conf = merge_jfile(conf, jfile)
+  return conf
+
+
+def split_to_keyval_tuple(param) :
+  """
+  Split a key=value string into the (key,value) tuple
+  * an exception is raised on any string "=value"
+  * if there is no string: exception.
+  * a key only definition maps to (key, None)
+  * a "key=" definition maps to (key, "")
+  """
+  if not len(param) :
+    raise Exception, "Empty string cannot be a key=value definition"
+  equalsPos = param.find("=")
+  if equalsPos < 0 :
+    return param, None
+  elif not equalsPos :
+    raise Exception, "no key in argument %s" % param
+  else :
+    key = param[:(equalsPos - 1)]
+    value = param[(equalsPos + 1) :]
+    return key, value
+
+
+def recursive_define(conf, path, value) :
+  if not len(path) :
+    #fallen off the end of the world
+    return
+  entry = path[0]
+  if len(path) == 1 :
+    #end of list, apply it.
+    conf[entry] = value
+  else :
+    #there's 1+ elements below, yet there's a subdir here.
+    if conf.has_key(entry) and type(conf[entry]) == dict :
+      #it's a subdir, simple: recurse.
+      recursive_define(conf[entry], path[1 :], value)
+    else :
+      #either there is an entry that isn't a conf, or its not there. Same 
outcome.
+      subconf = {}
+      conf[entry] = subconf
+      recursive_define(subconf, path[1 :], value)
+
+def recursive_undef(conf, path) :
+  if not len(path) :
+    #fallen off the end of the world
+    return
+  entry = path[0]
+  if len(path) == 1 :
+    #end of list, apply it.
+    del conf[entry]
+  else :
+    #there's 1+ elements below, yet there's a subdir here.
+    if conf.has_key(entry) and type(conf[entry]) == dict :
+      #it's a subdir, simple: recurse.
+      recursive_undef(conf[entry], path[1 :])
+    else :
+      #either there is an entry that isn't a conf, or its not there. Same 
outcome.
+      pass
+
+def apply_action(conf, action, key, value) :
+  """
+  Apply either a def or undef action; splitting the key into a path and 
running through it.
+  """
+  keypath = key.split("/")
+  #now have a split key,
+  if KEY_DEF == action :
+    recursive_define(conf, keypath, value)
+  elif KEY_UNDEF == action :
+    recursive_undef(conf, keypath)
+
+
+def apply_local_definitions(conf, definitions) :
+  """
+  Run through the definition actions and apply them one by one
+  """
+  for defn in definitions :
+    # split into key=value; no value -> empty string
+    (action, param) = defn
+    if KEY_DEF == action :
+      (key, val) = split_to_keyval_tuple(param)
+      apply_action(conf, KEY_DEF, key, val)
+
+  return conf
+
+
+#def parse_args(conf, args) :
+#  """
+#   split an arg string, parse the jfiles & merge over the conf
+#  (configuration, args[]) -> (conf', stripped, jfiles[])
+#  """
+#  (jfiles, stripped) = extract_jfiles(args)
+#
+#  actions = extract_args(args)
+#  jfiles = actions[KEY_JFILE]
+#  conf = merge_jfile_list(conf, jfiles)
+#  return conf, actions
+
+
+def print_conf(conf) :
+  """ dump the configuration to the console
+  """
+  print "{"
+  for (key, val) in conf.items() :
+    if type(val) == dict :
+      print key
+      print_conf(val)
+    else :
+      print "" + key + " => " + str(val)
+  print "}"
+
+
+def list_to_str(l, spacer) :
+  result = ""
+  for elt in l :
+    if len(result) > 0 :
+      result += spacer
+    result += elt
+  return result
+
+
+def list_to_hxml_str(l) :
+  return list_to_str(l, ",")
+
+
+def export_kv_xml(output, key, value) :
+  line = "<property><name>" + key + "</name><value>" + str(value) + 
"</value>\n"
+  print line
+  output.write(line)
+
+
+def export_to_hadoop_xml(output, conf) :
+  """ export the conf to hadoop XML
+  dictionaries are skipped.
+  """
+  output.write("<configuration>\n")
+  for (key, value) in conf.items() :
+    if type(value) is list :
+      # list print
+      export_kv_xml(output, key, list_to_hxml_str(value))
+    else :
+      if type(value) is dict :
+        print "skipping dict " + key
+      else :
+        export_kv_xml(output, key, value)
+  output.write("</configuration>\n")
+
+
+def start(conf, stripped_args) :
+  """
+  start the process by grabbing exec/args for the arguments
+  """
+  ex = conf["exec"]
+  args = []
+  jsonargs = get(ex, "args", [])
+  args.extend(jsonargs)
+  args.extend(stripped_args)
+  classname = get(ex, "classname", "")
+  if not len(classname) :
+    raise Exception, "No classname supplied"
+  classname = get(ex, "classname", "")
+  commandline = ["java"]
+  classpath = []
+  jvmargs = []
+  commandline.extend(jvmargs)
+  commandline.append("-classpath")
+  commandline.append(list_to_str(classpath, ":"))
+  commandline.append("org.apache.hadoop.yarn.service.launcher.ServiceLauncher")
+  commandline.append(classname)
+  commandline.extend(args)
+  print "ready to exec : %s" % commandline
+
+
+def main() :
+#  (conf, stripped, jfiles) = parse_args({}, sys.argv[1 :])
+  actions = extract_args(sys.argv[1 :])
+  jfiles = actions[KEY_JFILE]
+  conf = merge_jfile_list({}, jfiles)
+  apply_local_definitions(conf, actions[KEY_DEF])
+  exec_args = actions[KEY_ARGS]
+
+  print_conf(conf)
+  #  if len(stripped) > 0 :
+  #got an output file
+  #    filename = stripped[0]
+  #    print "Writing XML configuration to " + filename
+  #    output = open(filename, "w")
+  #    export_to_hadoop_xml(output, conf["site"])
+  start(conf, exec_args)
+
+
+if __name__ == "__main__" :
+  main()
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/site/site.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/site/site.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/site/site.xml
new file mode 100644
index 0000000..3b5df7a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/site/site.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~  or more contributor license agreements.  See the NOTICE file
+  ~  distributed with this work for additional information
+  ~  regarding copyright ownership.  The ASF licenses this file
+  ~  to you under the Apache License, Version 2.0 (the
+  ~  "License"); you may not use this file except in compliance
+  ~  with the License.  You may obtain a copy of the License at
+  ~
+  ~       http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~  Unless required by applicable law or agreed to in writing, software
+  ~  distributed under the License is distributed on an "AS IS" BASIS,
+  ~  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~  See the License for the specific language governing permissions and
+  ~  limitations under the License.
+  -->
+
+<project name="Slider">
+
+  <version position="right"/>
+  <body>
+    <menu ref="reports"/>
+  </body>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
new file mode 100644
index 0000000..ad374f8d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/pom.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0";
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+    <parent>
+        <artifactId>hadoop-yarn</artifactId>
+        <groupId>org.apache.hadoop</groupId>
+        <version>3.0.0-alpha2-SNAPSHOT</version>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-yarn-slider</artifactId>
+    <name>Apache Hadoop YARN Slider</name>
+    <packaging>pom</packaging>
+
+    <properties>
+        
<hadoop.common.build.dir>${basedir}/../../../../hadoop-common-project/hadoop-common/target</hadoop.common.build.dir>
+    </properties>
+
+    <!-- Do not add dependencies here, add them to the POM of the leaf module 
-->
+
+    <modules>
+        <module>hadoop-yarn-slider-core</module>
+    </modules>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31c4a419/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
index 233a353..493e03a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml
@@ -36,6 +36,8 @@
   <modules>
     <module>hadoop-yarn-applications-distributedshell</module>
     <module>hadoop-yarn-applications-unmanaged-am-launcher</module>
+    <module>hadoop-yarn-slider</module>
+
   </modules>
 
  <profiles>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to