Repository: incubator-myriad
Updated Branches:
  refs/heads/master 519d9b421 -> bcd1fc8c6


Added Docker Support, see the README.md in docker directory for quick start 
guide.

Code changes:
- Added classes necessary to pass configuration to TaskFactory
- Modified classes necessary to consume the configuration changes
- Edits to docker/scripts mostly to break up the build

@Todo
- add support for MesosInfo (for AppC support), will be done as more 
information about the MesosInfo object appears.
- add configuration Uri to avoid rebuilding images (done, separate PR).
- clean up docker scripts more, including the ability to pass build arguments, 
will be incrementally done.

JIRA:
  [Myriad-136] https://issues.apache.org/jira/browse/MYRIAD-136
Pull Request:
  Closes #67
Author:
  DarinJ <dar...@apache.org>

Code changes:
- Added classes necessary to pass configuration to TaskFactory
- Modified classes necessary to consume the configuration changes
- Edits to docker/scripts mostly to break up the build

@Todo
- add support for MesosInfo (for AppC support), will be done as more 
information about the MesosInfo object appears.
- add configuration Uri to avoid rebuilding images (done, separate PR).
- clean up docker scripts more, including the ability to pass build arguments, 
will be incrementally done.


Project: http://git-wip-us.apache.org/repos/asf/incubator-myriad/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-myriad/commit/bcd1fc8c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-myriad/tree/bcd1fc8c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-myriad/diff/bcd1fc8c

Branch: refs/heads/master
Commit: bcd1fc8c6f6ea184522bc9a2c58610710a4be9b3
Parents: 519d9b4
Author: darinj <darinj.w...@gmail.com>
Authored: Mon May 2 15:08:20 2016 -0400
Committer: darinj <dar...@apache.org>
Committed: Wed May 11 21:18:36 2016 -0400

----------------------------------------------------------------------
 build.gradle                                    |   8 +
 docker/Dockerfile                               |  21 ++-
 docker/README.md                                |  29 ++--
 docker/build-myriad.sh                          |   3 +-
 docker/build.gradle                             |  18 ++-
 docker/myriad-bin/configure-yarn.sh             |  60 +++++++
 docker/myriad-bin/create-user.sh                |  34 ----
 docker/myriad-bin/install-yarn.sh               |  54 ++-----
 docker/myriad-etc/mapred-site.xml.template      |  29 ++++
 .../myriad-config-default.yml.template          | 113 +++++++++++++
 docker/myriad-etc/yarn-site.xml.template        | 162 +++++++++++++++++++
 myriad-scheduler/build.gradle                   |   1 -
 .../configuration/MyriadConfiguration.java      |  30 +++-
 .../MyriadContainerConfiguration.java           |  54 +++++++
 .../MyriadDockerConfiguration.java              |  72 +++++++++
 .../scheduler/ServiceTaskFactoryImpl.java       |   3 +
 .../apache/myriad/scheduler/TaskFactory.java    |  11 +-
 .../org/apache/myriad/scheduler/TaskUtils.java  |  95 ++++++++++-
 .../apache/myriad/scheduler/TestTaskUtils.java  |  94 ++++++++++-
 ...iad-config-test-default-with-docker-info.yml |  90 +++++++++++
 ...-config-test-default-with-framework-role.yml |  70 ++++++++
 21 files changed, 934 insertions(+), 117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/build.gradle
----------------------------------------------------------------------
diff --git a/build.gradle b/build.gradle
index 54e3eac..ac416ec 100644
--- a/build.gradle
+++ b/build.gradle
@@ -77,6 +77,13 @@ subprojects {
 
     configurations.create('myriadExecutorConf')
 
+    //Keeping guava at the level of hadoop 2.6.0 so we don't get all the
+    //awesomeness in our IDE and fail at runtime.
+    configurations.all {
+        resolutionStrategy {
+            force 'com.google.guava:guava:11.0.2'
+        }
+    }
     configurations {
         provided
         capsule
@@ -97,6 +104,7 @@ subprojects {
         compile 'com.google.code.gson:gson:2.3.1'        // marshalling 
between the scheduler and executor
 
         testCompile 'org.apache.zookeeper:zookeeper:3.4.6' // to resolve 
temporary mavenlocal issue
+        runtime 'com.google.guava:guava:11.0.2'
         testCompile 'junit:junit:4.12'
         testCompile 'commons-collections:commons-collections:3.2.1'
     }

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 52ad7f3..4f344a6 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -24,6 +24,10 @@ MAINTAINER Apache Myriad d...@myriad.incubator.apache.org
 
 ENV HADOOP_USER="yarn"
 ENV HADOOP_VER="2.7.0"
+ENV HADOOP_HOME=/usr/local/hadoop
+ENV JAVA_HOME=/usr
+ENV MESOS_NATIVE_LIBRARY=/usr/local/lib/libmesos.so
+
 
 # Setup mesosphere repositories
 RUN apt-get -y update
@@ -32,24 +36,17 @@ RUN apt-key adv --keyserver keyserver.ubuntu.com --recv 
E56151BF
 RUN DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]') 
CODENAME=$(lsb_release -cs) && echo "deb http://repos.mesosphere.com/${DISTRO} 
${CODENAME} main" | tee /etc/apt/sources.list.d/mesosphere.list
 RUN apt-get -y update
 
-ADD myriad-bin/create-user.sh /create-user.sh
-RUN sh /create-user.sh
-
 # Install Mesos
 RUN apt-get install -y mesos curl tar
-
 # Run local YARN install
 ADD myriad-bin/install-yarn.sh /install-yarn.sh
 RUN sh /install-yarn.sh
-
+ADD myriad-bin/configure-yarn.sh /configure-yarn.sh
+RUN /configure-yarn.sh
 # Copy over myriad libraries
 ADD ./libs/* /usr/local/hadoop/share/hadoop/yarn/lib/
-
-
 # Initialize hadoop confs with env vars
-ADD myriad-bin/run-myriad.sh /run-myriad.sh
-RUN mkdir /myriad-conf/
-
-# Run the YARN resource manager
+ADD myriad-etc/* /usr/local/hadoop/etc/hadoop/
+RUN chown -R root:root /usr/local/hadoop/etc
 USER yarn
-CMD sh /run-myriad.sh 
+CMD /usr/local/hadoop/bin/yarn resourcemanager

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/README.md
----------------------------------------------------------------------
diff --git a/docker/README.md b/docker/README.md
index 7ce77ac..1650691 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -2,25 +2,36 @@
 Docker is the easiest way to from 0 to Yarn on Mesos within minutes. 
 
 ## Building the Resource Manager Docker
-Run the following command:
-```./gradlew buildRMDocker```
-This will build the ResourceManager from src and save the image as 
*mesos/myriad*.
 
 #Configuration Guide#
 
-In order for the ResourceManager to operate correctly, you will need to 
provide 2 configuration files. These files will need to mounted from a 
directory into */myriad-conf* within the docker container.
+In order for the ResourceManager to operate correctly, you will need to 
provide 5 configuration files. These files will 
+need to mounted from a directory into `./myriad-etc` of this folder.
 
-* 
[myriad-config-default.yml](https://github.com/mesos/myriad/blob/phase1/myriad-scheduler/src/main/resources/myriad-config-default.yml)
-* modified 
[yarn-site.xml](https://github.com/mesos/myriad/blob/phase1/docs/myriad-dev.md)
+* 
[myriad-config-default.yml](https://github.com/mesos/myriad/blob/phase1/myriad-scheduler/src/main/resources/myriad-config-default.yml)
 (template provided)
+* 
[yarn-site.xml](https://github.com/mesos/myriad/blob/phase1/docs/myriad-dev.md) 
(template provided)
+* mapred-site.xml (template provided)
+* hdfs-site.xml (used for hdfs)
+* core-site.xml (used for hdfs)
 
+It is assumed you already have hdfs and Mesos running.  For more information 
about Apache Mesos visit the [website](http://mesos.apache.org). 
+If you need to setup hdfs, consider using the [hdfs-mesos 
framework](https://github.com/mesosphere/hdfs).  Copy the hdfs.xml and 
+core-site.xml from your hdfs configuration into myriad-etc.  Additional, files 
maybe necessary such as rack.sh, log4j.properties, 
+hadoop-env.sh, and yarn-env.sh depending on your configuration.
+
+Run the following commands:
+```bash
+./gradlew -P username/myriad buildRMDocker
+docker push username/myriad
+```
+This will build the ResourceManager from src, save, and push the image as 
*username/myriad*.
 
 ## Running the Resource Manager Docker
 
 ```bash
 docker run --net=host --name='myriad-resourcemanager' -t \
-  -v /path/to/configs:/myriad-conf \
-  mesos/myriad
-  ```
+  <username>/myriad
+```
 
 #Environment Variables#
 * *ALTERNATE_HADOOP_URL* : Optional - Allows user to override the hadoop 
distribution used by Myriad. This will download the *.tar.gz file to be used as 
the hadoop distribution of choice for Myriad. 

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/build-myriad.sh
----------------------------------------------------------------------
diff --git a/docker/build-myriad.sh b/docker/build-myriad.sh
index d3b0e74..b0638d0 100644
--- a/docker/build-myriad.sh
+++ b/docker/build-myriad.sh
@@ -36,6 +36,7 @@ if [ ! -d "libs" ]; then
 fi
 
 # Copy over the Java Libraries
-cp -rf ../myriad-scheduler/build/libs/* libs/
 
+cp -rf ../myriad-scheduler/build/libs/* libs/
+cp -rf ../myriad-executor/build/libs/* libs/
 echo "Init complete! " #Modify config/myriad-default-config.yml to your liking 
before building the docker image"

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/build.gradle
----------------------------------------------------------------------
diff --git a/docker/build.gradle b/docker/build.gradle
index 4f35494..38f61e6 100644
--- a/docker/build.gradle
+++ b/docker/build.gradle
@@ -23,15 +23,25 @@ import com.bmuschko.gradle.docker.tasks.image.Dockerfile
 import com.bmuschko.gradle.docker.tasks.image.DockerBuildImage
 
 task copySchedulerJars(type:Copy){
+    dependsOn(":myriad-scheduler:build")
     def subdir = new File(project.buildDir, "libs")
     if( !subdir.exists() ) { subdir.mkdirs() }
     from '../myriad-scheduler/build/libs'
     into 'libs'
 }
 
-task copyMyriadJars(dependsOn: ":myriad-scheduler:build") {
+task copyExecutorJars(type:Copy){
     dependsOn(":myriad-executor:build")
+    def subdir = new File(project.buildDir, "libs")
+    if( !subdir.exists() ) { subdir.mkdirs() }
+    from '../myriad-executor/build/libs'
+    into 'libs'
+}
+
+
+task copyMyriadJars(dependsOn: ":myriad-scheduler:build") {
     dependsOn(copySchedulerJars)
+    dependsOn(copyExecutorJars)
 }
 
 task buildRMDocker(type: DockerBuildImage) {
@@ -46,5 +56,9 @@ task buildRMDocker(type: DockerBuildImage) {
         url = 'unix:///var/run/docker.sock'
     }  
     inputDir = file('.')
-    tag = 'mesos/myriad'
+    if(!project.hasProperty('dockerTag')) {
+        tag = 'mesos/myriad'
+    } else {
+        tag = dockerTag
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/myriad-bin/configure-yarn.sh
----------------------------------------------------------------------
diff --git a/docker/myriad-bin/configure-yarn.sh 
b/docker/myriad-bin/configure-yarn.sh
new file mode 100755
index 0000000..ff85ba9
--- /dev/null
+++ b/docker/myriad-bin/configure-yarn.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+: '
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+'
+##
+# YARN installer script for Apache Myriad Deployment
+##
+
+# Put in env defaults if they are missing
+export HADOOP_GROUP=${HADOOP_GROUP:='hadoop'}
+
+export YARN_USER=${YARN_USER:='yarn'}
+export USER_UID=${USER_UID:='107'}
+export YARN_GROUP=${YARN_GROUP:='yarn'}
+export HADOOP_GID=${HADOOP_GID:='112'}
+export YARN_GID=${YARN_GID:='113'}
+export HADOOP_HOME=${HADOOP_HOME:='/usr/local/hadoop'}
+
+# Add hduser user
+groupadd ${HADOOP_GROUP} -g ${HADOOP_GID}
+groupadd ${YARN_GROUP} -g ${YARN_GID}
+useradd ${YARN_USER} -g ${YARN_GROUP} -G ${HADOOP_GROUP} -u ${USER_UID} -s 
/bin/bash
+mkdir /home/${HADOOP_USER}
+chown -R ${YARN_USER}:${YARN_GROUP} /home/${YARN_USER}
+
+#set permissions
+chown -R root:root ${HADOOP_HOME}
+chmod -R g-w /usr/local/
+chown root:${YARN_GROUP} ${HADOOP_HOME}/bin/container-executor
+chmod 6050 ${HADOOP_HOME}/bin/container-executor
+
+# Init bashrc with hadoop env variables
+sh -c 'echo export JAVA_HOME=/usr >> /home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export HADOOP_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export PATH=\$PATH:\${HADOOP_HOME}/bin >> 
/home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export PATH=\$PATH:\${HADOOP_HOME}/sbin >> 
/home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export HADOOP_MAPRED_HOME=\${HADOOP_HOME} >> 
/home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export HADOOP_COMMON_HOME=\${HADOOP_HOME} >> 
/home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export HADOOP_HDFS_HOME=\${HADOOP_HOME} >> 
/home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export YARN_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export HADOOP_COMMON_LIB_NATIVE_DIR=\$\{HADOOP_HOME\}/lib/native 
>> /home/${HADOOP_USER}/.bashrc'
+sh -c 'echo export HADOOP_OPTS=\"-Djava.library.path=\${HADOOP_HOME}/lib\" >> 
/home/${HADOOP_USER}/.bashrc'
+
+
+echo "end of configure-yarn.sh script"

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/myriad-bin/create-user.sh
----------------------------------------------------------------------
diff --git a/docker/myriad-bin/create-user.sh b/docker/myriad-bin/create-user.sh
deleted file mode 100755
index c6c1bf6..0000000
--- a/docker/myriad-bin/create-user.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-: '
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
-'
-# Put in env defaults if they are missing
-export HADOOP_GROUP=${HADOOP_GROUP:='hadoop'}
-export HADOOP_USER=${HADOOP_USER:='yarn'}
-export HADOOP_HOME=${HADOOP_HOME:='/usr/local/hadoop'}
-export USER_UID=${USER_UID:='113'}
-export GROUP_UID=${GROUP_GID:='112'}
-
-# Add hduser user
-echo "Creating $HADOOP_USER user.."
-groupadd $HADOOP_GROUP -g ${GROUP_UID}
-useradd $HADOOP_USER -g $HADOOP_GROUP -u ${USER_UID} -s /bin/bash -d 
/home/${HADOOP_USER}
-mkdir /home/${HADOOP_USER}
-chown -R $HADOOP_USER:$HADOOP_GROUP /home/${HADOOP_USER}
-
-echo "end of create-user.sh script"

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/myriad-bin/install-yarn.sh
----------------------------------------------------------------------
diff --git a/docker/myriad-bin/install-yarn.sh 
b/docker/myriad-bin/install-yarn.sh
index 41b23ed..90c2762 100755
--- a/docker/myriad-bin/install-yarn.sh
+++ b/docker/myriad-bin/install-yarn.sh
@@ -21,60 +21,28 @@ under the License.
 # YARN installer script for Apache Myriad Deployment
 ##
 HADOOP_VER="2.7.0"
-HADOOP_TARBALL_URL=http://apache.osuosl.org/hadoop/common/hadoop-${HADOOP_VER}/hadoop-${HADOOP_VER}.tar.gz
 
 echo "Installing Yarn...."
-if [ ! -z "$1" ];then
-  HADOOP_TARBALL_URL=$1
-  echo "Deleting previous hadoop home"
-  rm -rf ${HADOOP_HOME}
+if [ ! -z $1 ];then
+  HADOOP_URL=$1
+else
+  
HADOOP_URL=http://apache.osuosl.org/hadoop/common/hadoop-${HADOOP_VER}/hadoop-${HADOOP_VER}.tar.gz
 fi
-
-# Download the tarball
-wget -O /opt/hadoop.tgz ${HADOOP_TARBALL_URL}
-HADOOP_BASENAME=`basename ${HADOOP_TARBALL_URL} .tar.gz`
-
-# Put in env defaults if they are missing
-export HADOOP_GROUP=${HADOOP_GROUP:='hadoop'}
-export HADOOP_USER=${HADOOP_USER:='yarn'}
+HADOOP_TGZ=`basename ${HADOOP_URL}`
+HADOOP_BASENAME=`basename ${HADOOP_URL} .tar.gz`
 export HADOOP_HOME=${HADOOP_HOME:='/usr/local/hadoop'}
-export USER_UID=${USER_UID:='113'}
-export GROUP_UID=${GROUP_GID:='112'}
-
-# Add hduser user
-groupadd $HADOOP_GROUP -g ${GROUP_UID}
-useradd $HADOOP_USER -g $HADOOP_GROUP -u ${USER_UID} -s /bin/bash
-#mkdir /home/${HADOOP_USER}
-chown -R $HADOOP_USER:$HADOOP_GROUP /home/${HADOOP_USER}
 
 # Extract Hadoop
-tar vxzf /opt/hadoop.tgz -C /tmp
-#mv /tmp/hadoop-${HADOOP_VER} ${HADOOP_HOME}
-echo "Moving /tmp/hadoop-${HADOOP_BASENAME} to ${HADOOP_HOME}"
+echo "Downloading ${HADOOP_TGZ} from ${HADOOP_URL}"
+wget ${HADOOP_URL}
+tar xzf ${HADOOP_TGZ} -C /tmp
 mv /tmp/${HADOOP_BASENAME} ${HADOOP_HOME}
-ls -lath ${HADOOP_HOME}
-
-mkdir /home/$HADOOP_USER
-chown -R ${HADOOP_USER}:${HADOOP_GROUP} ${HADOOP_HOME}
-
-# Init bashrc with hadoop env variables
-sh -c 'echo export JAVA_HOME=/usr >> /home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export HADOOP_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export PATH=\$PATH:\${HADOOP_HOME}/bin >> 
/home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export PATH=\$PATH:\${HADOOP_HOME}/sbin >> 
/home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export HADOOP_MAPRED_HOME=\${HADOOP_HOME} >> 
/home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export HADOOP_COMMON_HOME=\${HADOOP_HOME} >> 
/home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export HADOOP_HDFS_HOME=\${HADOOP_HOME} >> 
/home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export YARN_HOME=\${HADOOP_HOME} >> /home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export HADOOP_COMMON_LIB_NATIVE_DIR=\$\{HADOOP_HOME\}/lib/native 
>> /home/${HADOOP_USER}/.bashrc'
-sh -c 'echo export HADOOP_OPTS=\"-Djava.library.path=\${HADOOP_HOME}/lib\" >> 
/home/${HADOOP_USER}/.bashrc'
+#Remove tarball
+rm -f ${HADOOP_TGZ}
 
 # Link Mesos Libraries
-touch ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
 echo "export JAVA_HOME=/usr" >> ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
 echo "export MESOS_NATIVE_JAVA_LIBRARY=/usr/local/lib/libmesos.so" >> 
${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
-
 # Ensure the hadoop-env is executable
 chmod +x ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
-
 echo "end of install-yarn.sh script"

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/myriad-etc/mapred-site.xml.template
----------------------------------------------------------------------
diff --git a/docker/myriad-etc/mapred-site.xml.template 
b/docker/myriad-etc/mapred-site.xml.template
new file mode 100644
index 0000000..b21660f
--- /dev/null
+++ b/docker/myriad-etc/mapred-site.xml.template
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+<!--s option enables dynamic port assignment by mesos -->
+<configuration>
+<property>
+  <name>mapreduce.shuffle.port</name>
+  <value>${myriad.mapreduce.shuffle.port}</value>
+</property>
+<property>
+  <name>mapreduce.framework.name</name>
+  <value>yarn</value>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/myriad-etc/myriad-config-default.yml.template
----------------------------------------------------------------------
diff --git a/docker/myriad-etc/myriad-config-default.yml.template 
b/docker/myriad-etc/myriad-config-default.yml.template
new file mode 100644
index 0000000..6a1e52a
--- /dev/null
+++ b/docker/myriad-etc/myriad-config-default.yml.template
@@ -0,0 +1,113 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+##
+
+# Address of the mesos master - <IP:port> or ZooKeeper path
+mesosMaster: zk://CHANGEME:2181/mesos
+#Container information for the node managers
+containerInfo:
+    type: DOCKER
+    dockerInfo:
+        image: CHANGEME/myriad
+    volume:
+        -
+          containerPath: /tmp
+          hostPath: /tmp
+# Whether to check point myriad's mesos framework or not
+checkpoint: false
+# Myriad's mesos framework failover timeout in milliseconds. This tells mesos
+# to expect myriad would failover within this time interval.
+frameworkFailoverTimeout: 60000 #10 minutes
+# Myriad's mesos framework name.
+frameworkName: MyriadAlpha
+# Myriad's mesos framework role.
+#frameworkRole: myriad
+# User the Node Manager will run as (Defaults to user running the resource 
manager if absent,  necessary for remote distribution).
+frameworkUser: yarn
+# User that gets the nodeManagerUri and sets up the directories for Node 
Manager, must have passwordless sudo (Necessary only for remote distribution, 
otherwise ignored).
+# frameworkSuperUser: root
+# Myriad's REST-ful services port mapping.
+restApiPort: 8192
+# Address of the ZK ensemble (separate by comma, if multiple zk servers are 
used)
+#zkServers: localhost:2181
+# ZK Session timeout
+zkTimeout: 20000
+haEnabled: True
+# The node manager profiles. The REST API to flex up expects one of the 
profiles defined here.
+# Admin can define custom profiles (requires restart of Resource Manager)
+nmInstances:
+  medium: 1
+profiles:
+  zero:
+    cpu: 0
+    mem: 0
+  small:
+    cpu: 1
+    mem: 1024
+  medium:
+    cpu: 2
+    mem: 2048
+  large:
+    cpu: 4
+    mem: 4096
+  extralarge:
+    cpu: 6
+    mem: 6144
+# Whether to turn on myriad's auto-rebalancer feature.
+# Currently it's work-in-progress and should be set to 'false'.   
+rebalancer: false
+# Properties for the Node Manager process that's launched by myriad as a 
result of 'flex up' REST call.
+nodemanager:
+  jvmMaxMemoryMB: 1024  # Xmx for NM JVM process.
+  cpus: 0.2             # CPU needed by NM process.
+#  cgroups: true         # Whether NM should support CGroups. If set to 
'true', myriad automatically
+                        # configures yarn-site.xml to attach YARN's cgroups 
under Mesos' cgroup hierarchy.
+executor:
+  jvmMaxMemoryMB: 256   # Xmx for myriad's executor that launches Node Manager.
+  #path: hdfs://172.31.2.176:54310/dist/myriad-executor-runnable-x.x.x.jar # 
Path for the myriad's executor binary.
+  nodeManagerUri: hdfs://172.31.2.167:54310/dist/hadoop-2.6.0.tgz # the uri to 
d/l hadoop from   # Path to the Hadoop tarball
+# Environment variables required to launch Node Manager process. Admin can 
also pass other environment variables to NodeManager.
+yarnEnvironment:
+  YARN_HOME: /usr/local/hadoop # Or /usr/local/hadoop if using MapR's Hadoop
+  #YARN_HOME: hadoop-2.6.0 # Should be relative nodeManagerUri is set
+  #YARN_NODEMANAGER_OPTS: -Dnodemanager.resource.io-spindles=4.0 # Required 
only if using MapR's Hadoop
+  JAVA_HOME: /usr/ # System dependent, but sometimes necessary
+services:
+   jobhistory:
+     command: $YARN_HOME/bin/mapred historyserver
+     jvmMaxMemoryMB: 1024
+     cpus: 1
+     maxInstances: 1
+     #ports:
+      # myriad.mapreduce.jobhistory.admin.address: -1
+      # myriad.mapreduce.jobhistory.address: -1
+      # myriad.mapreduce.jobhistory.webapp.address: -1
+     envSettings: -Dcluster.name.prefix=/mycluster
+     taskName: jobhistory
+  timelineserver:
+     command: $YARN_HOME/bin/yarn timelineserver
+     jvmMaxMemoryMB: 1024
+     cpus: 1
+     envSettings: -Dcluster.name.prefix=/mycluster2
+     taskName: timelineserver
+
+
+# Authentication principal for Myriad's mesos framework
+#mesosAuthenticationPrincipal: some_principal
+# Authentication secret filename for Myriad's mesos framework
+#mesosAuthenticationSecretFilename: /path/to/secret/filename

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/docker/myriad-etc/yarn-site.xml.template
----------------------------------------------------------------------
diff --git a/docker/myriad-etc/yarn-site.xml.template 
b/docker/myriad-etc/yarn-site.xml.template
new file mode 100644
index 0000000..c81b124
--- /dev/null
+++ b/docker/myriad-etc/yarn-site.xml.template
@@ -0,0 +1,162 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+
+
+<configuration>
+
+  <!--The hostname of the Resource Manager, this MUST be changed! -->
+  <!--If you plan to deploy via marathon, use <taskId>.marathon.mesos where 
taskId is the name of your marathon task-->
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>CHANGEME</value>
+  </property>
+
+  <!-- Necessary for log aggregation -->
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <description>Where to aggregate logs to.</description>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/var/log/hadoop-yarn/apps</value>
+  </property>
+
+  <!-- Aux Services needed for Map/Reduce and Myriad -->
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle,myriad_executor</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.store.class</name>
+    
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.MyriadFileSystemRMStateStore</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.fs.state-store.uri</name>
+    <value>/var/yarn/rm/system</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.recovery.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.myriad_executor.class</name>
+    <value>org.apache.myriad.executor.MyriadExecutorAuxService</value>
+  </property>
+
+  <!-- Myriad specific YARN configuration properties-->
+  <property>
+    <name>yarn.nodemanager.resource.cpu-vcores</name>
+    <value>${nodemanager.resource.cpu-vcores}</value>
+   </property>
+   <property>
+     <name>yarn.scheduler.minimum-allocation-vcores</name>
+     <value>0</value>
+   </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>${nodemanager.resource.memory-mb}</value>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>0</value>
+  </property>
+
+  <!-- Dynamic Port Assignment enabled by Mesos -->
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>${myriad.yarn.nodemanager.address}</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.webapp.address</name>
+    <value>${myriad.yarn.nodemanager.webapp.address}</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.webapp.https.address</name>
+    <value>${myriad.yarn.nodemanager.webapp.address}</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.localizer.address</name>
+    <value>${myriad.yarn.nodemanager.localizer.address}</value>
+  </property>
+
+  <!-- Myriad Scheduler configuration -->
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.myriad.scheduler.yarn.MyriadFairScheduler</value>
+  </property>
+
+  <!-- Cgroups configuration -->
+  <!--
+  <property>
+    <description>who will execute(launch) the containers.</description>
+    <name>yarn.nodemanager.container-executor.class</name>
+    
<value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
+  </property>
+
+  <property>
+    <description>The class which should help the LCE handle 
resources.</description>
+    
<name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
+    
<value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value>
+  </property>
+
+  <property>
+    <description>The class which should help the LCE handle 
resources.</description>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>yarn</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
+    <value></value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name>
+    
<value>${yarn.nodemanager.linux-container-executor.cgroups.mount-path}</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>root</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.path</name>
+    <value>${yarn.home}/bin/container-executor</value>
+  </property>
+-->
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/build.gradle
----------------------------------------------------------------------
diff --git a/myriad-scheduler/build.gradle b/myriad-scheduler/build.gradle
index fc7516d..3e88c03 100644
--- a/myriad-scheduler/build.gradle
+++ b/myriad-scheduler/build.gradle
@@ -38,7 +38,6 @@ dependencies {
     compile "org.apache.commons:commons-lang3:3.4"
     compile 'com.google.inject.extensions:guice-multibindings:3.0'
     testCompile 
"org.apache.hadoop:hadoop-yarn-server-resourcemanager:${hadoopVer}:tests"
-
 }
 
 configurations {

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadConfiguration.java
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadConfiguration.java
 
b/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadConfiguration.java
index 67c8232..716a20a 100644
--- 
a/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadConfiguration.java
+++ 
b/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadConfiguration.java
@@ -22,6 +22,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
 import com.google.common.base.Optional;
 import com.google.common.base.Strings;
 import java.util.Map;
+
 import org.codehaus.jackson.map.annotate.JsonSerialize;
 import org.hibernate.validator.constraints.NotEmpty;
 
@@ -53,8 +54,27 @@ import org.hibernate.validator.constraints.NotEmpty;
  * executor:
  * jvmMaxMemoryMB: 256
  * path: 
file://localhost/usr/local/libexec/mesos/myriad-executor-runnable-0.1.0.jar
+ * containerInfo:
+ *   DockerInfo:
+ *     image: mesos/myriad
+ *     forcePullImage: false
+ *     parameters:
+ *       -
+ *         key: key
+ *         value: value
+ *       -
+ *         key: key
+ *         value: value
+ *   volumes:
+ *     -
+ *       containerPath: path
+ *       hostPath: path
+ *       mode: RW
+ *     -
+ *       containerPath: path
+ *       hostPath: path
  * yarnEnvironment:
- * YARN_HOME: /usr/local/hadoop
+ *   YARN_HOME: /usr/local/hadoop
  */
 public class MyriadConfiguration {
   /**
@@ -93,6 +113,9 @@ public class MyriadConfiguration {
   private Boolean checkpoint;
 
   @JsonProperty
+  private MyriadContainerConfiguration containerInfo;
+
+  @JsonProperty
   private Double frameworkFailoverTimeout;
 
   @JsonProperty
@@ -160,7 +183,6 @@ public class MyriadConfiguration {
   public MyriadConfiguration() {
   }
 
-
   public String getMesosMaster() {
     return mesosMaster;
   }
@@ -169,6 +191,10 @@ public class MyriadConfiguration {
     return this.checkpoint != null ? checkpoint : DEFAULT_CHECKPOINT;
   }
 
+  public Optional<MyriadContainerConfiguration> getContainerInfo() {
+    return Optional.fromNullable(containerInfo);
+  }
+
   public Double getFrameworkFailoverTimeout() {
     return this.frameworkFailoverTimeout != null ? 
this.frameworkFailoverTimeout : DEFAULT_FRAMEWORK_FAILOVER_TIMEOUT_MS;
   }

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadContainerConfiguration.java
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadContainerConfiguration.java
 
b/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadContainerConfiguration.java
new file mode 100644
index 0000000..6a6a8bc
--- /dev/null
+++ 
b/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadContainerConfiguration.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.myriad.configuration;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.common.base.Optional;
+import org.hibernate.validator.constraints.NotEmpty;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * MyriadContainerConfiguration
+ * Provides deserialization support for containerInfo data stored in yaml 
config
+ */
+public class MyriadContainerConfiguration {
+  @JsonProperty
+  @NotEmpty
+  private String type;
+  @JsonProperty
+  private MyriadDockerConfiguration dockerInfo;
+  @JsonProperty
+  private List<Map<String, String>> volumes;
+
+  @JsonProperty
+  public String getType() {
+    return type;
+  }
+
+  public Optional<MyriadDockerConfiguration> getDockerInfo() {
+    return Optional.fromNullable(dockerInfo);
+  }
+
+  public Iterable<Map<String, String>> getVolumes() {
+    return volumes == null ? new ArrayList<Map<String, String>>() : volumes;
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadDockerConfiguration.java
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadDockerConfiguration.java
 
b/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadDockerConfiguration.java
new file mode 100644
index 0000000..38a2f0f
--- /dev/null
+++ 
b/myriad-scheduler/src/main/java/org/apache/myriad/configuration/MyriadDockerConfiguration.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.myriad.configuration;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.hibernate.validator.constraints.NotEmpty;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * MyriadDockerConfiguration
+ * Provides deserialization support for dockerInfo data stored in yaml config
+ */
+public class MyriadDockerConfiguration {
+  @JsonProperty
+  @NotEmpty
+  String image;
+
+  @JsonProperty
+  String network;
+
+  @JsonProperty
+  Boolean privledged;
+
+  @JsonProperty
+  Boolean forcePullImage;
+
+  @JsonProperty
+  List<Map<String, String>> parameters;
+
+  public String getImage() {
+    return image;
+  }
+
+  public Boolean getForcePullImage() {
+    return forcePullImage != null ? forcePullImage : false;
+  }
+
+  public String getNetwork() {
+    return network != null ? network : "HOST";
+  }
+
+  public Boolean getPrivledged() {
+    return privledged != null ? privledged : false;
+  }
+
+  public List<Map<String, String>> getParameters() {
+    if (parameters == null) {
+      return new ArrayList<>();
+    } else {
+      return parameters;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/ServiceTaskFactoryImpl.java
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/ServiceTaskFactoryImpl.java
 
b/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/ServiceTaskFactoryImpl.java
index 076ebbb..dd86ac0 100644
--- 
a/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/ServiceTaskFactoryImpl.java
+++ 
b/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/ServiceTaskFactoryImpl.java
@@ -140,6 +140,9 @@ public class ServiceTaskFactoryImpl implements TaskFactory {
       
taskBuilder.addResources(Resource.newBuilder().setName("ports").setType(Value.Type.RANGES).setRanges(valueRanger.build()));
     }
     taskBuilder.setCommand(commandInfo);
+    if (cfg.getContainerInfo().isPresent()) {
+      taskBuilder.setContainer(taskUtils.getContainerInfo());
+    }
     return taskBuilder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskFactory.java
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskFactory.java 
b/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskFactory.java
index ecf0276..9ed54a7 100644
--- 
a/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskFactory.java
+++ 
b/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskFactory.java
@@ -201,10 +201,13 @@ public interface TaskFactory {
       ExecutorID executorId = ExecutorID.newBuilder()
           .setValue(EXECUTOR_PREFIX + frameworkId.getValue() + 
offer.getId().getValue() + offer.getSlaveId().getValue())
           .build();
-      return 
ExecutorInfo.newBuilder().setCommand(commandInfo).setName(EXECUTOR_NAME).setExecutorId(executorId)
-          .addAllResources(taskUtils.getScalarResource(offer, "cpus", 
taskUtils.getExecutorCpus(), 0.0))
-          .addAllResources(taskUtils.getScalarResource(offer, "mem", 
taskUtils.getExecutorMemory(), 0.0))
-          .build();
+      ExecutorInfo.Builder executorInfo = 
ExecutorInfo.newBuilder().setCommand(commandInfo).setName(EXECUTOR_NAME).setExecutorId(executorId)
+              .addAllResources(taskUtils.getScalarResource(offer, "cpus", 
taskUtils.getExecutorCpus(), 0.0))
+              .addAllResources(taskUtils.getScalarResource(offer, "mem", 
taskUtils.getExecutorMemory(), 0.0));
+      if (cfg.getContainerInfo().isPresent()) {
+        executorInfo.setContainer(taskUtils.getContainerInfo());
+      }
+      return executorInfo.build();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskUtils.java
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskUtils.java 
b/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskUtils.java
index 845c3be..1d9c518 100644
--- a/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskUtils.java
+++ b/myriad-scheduler/src/main/java/org/apache/myriad/scheduler/TaskUtils.java
@@ -18,11 +18,15 @@
  */
 package org.apache.myriad.scheduler;
 
+import com.google.common.base.Function;
 import com.google.common.base.Optional;
+import com.google.common.collect.Iterables;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.StringWriter;
-import java.util.ArrayList;
+import java.util.*;
+import javax.annotation.Nullable;
 import javax.inject.Inject;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
@@ -40,12 +44,9 @@ import javax.xml.xpath.XPathExpression;
 import javax.xml.xpath.XPathExpressionException;
 import javax.xml.xpath.XPathFactory;
 
+import com.google.common.base.Preconditions;
 import org.apache.mesos.Protos;
-import org.apache.myriad.configuration.MyriadBadConfigurationException;
-import org.apache.myriad.configuration.MyriadConfiguration;
-import org.apache.myriad.configuration.MyriadExecutorConfiguration;
-import org.apache.myriad.configuration.NodeManagerConfiguration;
-import org.apache.myriad.configuration.ServiceConfiguration;
+import org.apache.myriad.configuration.*;
 import org.apache.myriad.executor.MyriadExecutorDefaults;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -60,11 +61,19 @@ import org.xml.sax.SAXException;
  */
 public class TaskUtils {
   private static final Logger LOGGER = 
LoggerFactory.getLogger(TaskUtils.class);
-
   private static final String YARN_NODEMANAGER_RESOURCE_CPU_VCORES = 
"yarn.nodemanager.resource.cpu-vcores";
   private static final String YARN_NODEMANAGER_RESOURCE_MEMORY_MB = 
"yarn.nodemanager.resource.memory-mb";
+  private static final String CONTAINER_PATH_KEY = "containerPath";
+  private static final String HOST_PATH_KEY = "hostPath";
+  private static final String RW_MODE = "mode";
+  private static final String CONTAINER_PORT_KEY = "containerPort";
+  private static final String HOST_PORT_KEY = "hostPort";
+  private static final String PROTOCOL_KEY = "protocol";
+  private static final String PARAMETER_KEY_KEY = "key";
+  private static final String PARAMETER_VALUE_KEY = "value";
 
   private MyriadConfiguration cfg;
+  Random random = new Random();
 
   @Inject
   public TaskUtils(MyriadConfiguration cfg) {
@@ -208,14 +217,84 @@ public class TaskUtils {
     return auxConf.getJvmMaxMemoryMB().get();
   }
 
+  public TaskUtils() {
+    super();
+  }
+
+  public Iterable<Protos.Volume> getVolumes(Iterable<Map<String, String>> 
volume) {
+    return Iterables.transform(volume, new Function<Map<String, String>, 
Protos.Volume>() {
+      @Nullable
+      @Override
+      public Protos.Volume apply(Map<String, String> map) {
+        Preconditions.checkArgument(map.containsKey(HOST_PATH_KEY) && 
map.containsKey(CONTAINER_PATH_KEY));
+        Protos.Volume.Mode mode = Protos.Volume.Mode.RO;
+        if (map.containsKey(RW_MODE) && 
map.get(RW_MODE).toLowerCase().equals("rw")) {
+          mode = Protos.Volume.Mode.RW;
+        }
+        return Protos.Volume.newBuilder()
+            .setContainerPath(map.get(CONTAINER_PATH_KEY))
+            .setHostPath(map.get(HOST_PATH_KEY))
+            .setMode(mode)
+            .build();
+      }
+    });
+  }
+
+  public Iterable<Protos.Parameter> getParameters(Iterable<Map<String, 
String>> params) {
+    Preconditions.checkNotNull(params);
+    return Iterables.transform(params, new Function<Map<String, String>, 
Protos.Parameter>() {
+      @Override
+      public Protos.Parameter apply(Map<String, String> parameter) {
+        Preconditions.checkNotNull(parameter, "Null parameter");
+        Preconditions.checkState(parameter.containsKey(PARAMETER_KEY_KEY), 
"Missing key");
+        Preconditions.checkState(parameter.containsKey(PARAMETER_VALUE_KEY), 
"Missing value");
+        return Protos.Parameter.newBuilder()
+            .setKey(parameter.get(PARAMETER_KEY_KEY))
+            .setValue(PARAMETER_VALUE_KEY)
+            .build();
+      }
+    });
+  }
+
+  private Protos.ContainerInfo.DockerInfo 
getDockerInfo(MyriadDockerConfiguration dockerConfiguration) {
+    
Preconditions.checkArgument(dockerConfiguration.getNetwork().equals("HOST"), 
"Currently only host networking supported");
+    Protos.ContainerInfo.DockerInfo.Builder dockerBuilder = 
Protos.ContainerInfo.DockerInfo.newBuilder()
+        .setImage(dockerConfiguration.getImage())
+        .setForcePullImage(dockerConfiguration.getForcePullImage())
+        
.setNetwork(Protos.ContainerInfo.DockerInfo.Network.valueOf(dockerConfiguration.getNetwork()))
+        .setPrivileged(dockerConfiguration.getPrivledged())
+        .addAllParameters(getParameters(dockerConfiguration.getParameters()));
+    return dockerBuilder.build();
+  }
+
+  /**
+   * Builds a ContainerInfo Object
+   *
+   * @return ContainerInfo
+   */
+  public Protos.ContainerInfo getContainerInfo() {
+    Preconditions.checkArgument(cfg.getContainerInfo().isPresent(), 
"ContainerConfiguration doesn't exist!");
+    MyriadContainerConfiguration containerConfiguration = 
cfg.getContainerInfo().get();
+    Protos.ContainerInfo.Builder containerBuilder = 
Protos.ContainerInfo.newBuilder()
+        
.setType(Protos.ContainerInfo.Type.valueOf(containerConfiguration.getType()))
+        .addAllVolumes(getVolumes(containerConfiguration.getVolumes()));
+    if (containerConfiguration.getDockerInfo().isPresent()) {
+      MyriadDockerConfiguration dockerConfiguration = 
containerConfiguration.getDockerInfo().get();
+      containerBuilder.setDocker(getDockerInfo(dockerConfiguration));
+    }
+    return containerBuilder.build();
+  }
+
+
   /**
    * Helper function that returns all scalar resources of a given name in an 
offer up to a given value.  Attempts to
    * take resource from the prescribed role first and then from the default 
role.  The variable used indicated any
    * resources previously requested.   Assumes enough resources are present.
+   *
    * @param offer - An offer by Mesos, assumed to have enough resources.
    * @param name  - The name of the SCALAR resource, i.e. cpus or mem
    * @param value - The amount of SCALAR resources needed.
-   * @param used - The amount of SCALAR resources already removed from this 
offer.
+   * @param used  - The amount of SCALAR resources already removed from this 
offer.
    * @return An Iterable containing one or two scalar resources of a given 
name in an offer up to a given value.
    */
   public Iterable<Protos.Resource> getScalarResource(Protos.Offer offer, 
String name, Double value, Double used) {

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/test/java/org/apache/myriad/scheduler/TestTaskUtils.java
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/test/java/org/apache/myriad/scheduler/TestTaskUtils.java 
b/myriad-scheduler/src/test/java/org/apache/myriad/scheduler/TestTaskUtils.java
index a04b94b..14e0a00 100644
--- 
a/myriad-scheduler/src/test/java/org/apache/myriad/scheduler/TestTaskUtils.java
+++ 
b/myriad-scheduler/src/test/java/org/apache/myriad/scheduler/TestTaskUtils.java
@@ -19,13 +19,17 @@ package org.apache.myriad.scheduler;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import com.google.common.collect.Range;
+import com.google.common.collect.Ranges;
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
+import org.apache.mesos.Protos;
 import org.apache.myriad.configuration.MyriadBadConfigurationException;
 import org.apache.myriad.configuration.MyriadConfiguration;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -35,12 +39,20 @@ import static org.junit.Assert.fail;
 public class TestTaskUtils {
 
   static MyriadConfiguration cfg;
+  static MyriadConfiguration cfgWithRole;
+  static MyriadConfiguration cfgWithDocker;
+  static double epsilon = .0001;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     ObjectMapper mapper = new ObjectMapper(new YAMLFactory());
     cfg = 
mapper.readValue(Thread.currentThread().getContextClassLoader().getResource("myriad-config-test-default.yml"),
         MyriadConfiguration.class);
+    cfgWithRole = 
mapper.readValue(Thread.currentThread().getContextClassLoader().getResource("myriad-config-test-default-with-framework-role.yml"),
+        MyriadConfiguration.class);
+    cfgWithDocker = 
mapper.readValue(Thread.currentThread().getContextClassLoader().getResource("myriad-config-test-default-with-docker-info.yml"),
+            MyriadConfiguration.class);
+
   }
 
   @AfterClass
@@ -74,7 +86,6 @@ public class TestTaskUtils {
     Gson gson = new 
GsonBuilder().registerTypeAdapter(ServiceResourceProfile.class, new 
ServiceResourceProfile.CustomDeserializer())
         .create();
 
-
     ServiceResourceProfile parentProfile = new ServiceResourceProfile("abc", 
1.0, 100.0);
 
     String parentStr = gson.toJson(parentProfile);
@@ -98,4 +109,85 @@ public class TestTaskUtils {
 
     new Throwable().printStackTrace();
   }
+  private Protos.Offer createScalarOffer(String name, double roleVal, double 
defaultVal) {
+    Protos.Offer offer = Protos.Offer.newBuilder()
+        .setId(Protos.OfferID.newBuilder().setValue("offerId"))
+        .setSlaveId(Protos.SlaveID.newBuilder().setValue("slaveId"))
+        .setHostname("test.com")
+        
.setFrameworkId(Protos.FrameworkID.newBuilder().setValue("frameworkId"))
+        .addResources(
+            Protos.Resource.newBuilder()
+                .setScalar(Protos.Value.Scalar.newBuilder().setValue(roleVal))
+                .setType(Protos.Value.Type.SCALAR)
+                .setName(name)
+                .setRole("test")
+                .build())
+        .addResources(
+            Protos.Resource.newBuilder()
+                
.setScalar(Protos.Value.Scalar.newBuilder().setValue(defaultVal))
+                .setType(Protos.Value.Type.SCALAR)
+                .setName("cpus")
+                .build())
+        .build();
+    return offer;
+  }
+
+  private void checkResourceList(Iterable<Protos.Resource> resources, String 
name, Double roleVal, Double defaultVal) {
+    int i = 0;
+    Range defaultValueRange = Ranges.closed(defaultVal - epsilon, defaultVal + 
epsilon);
+    Range roleValueRange = Ranges.closed(roleVal - epsilon, roleVal + epsilon);
+
+    for (Protos.Resource resource: resources) {
+      if (resource.hasRole() && resource.getRole().equals("test")) {
+        double v = resource.getScalar().getValue();
+        assertTrue("Test Role  has " + v + " " + name + " should have " + 
roleVal, roleValueRange.contains(v));
+        i++;
+      } else {
+        double v = resource.getScalar().getValue();
+        assertTrue("Default Role has " + v + " " + name + " should have " + 
defaultVal , defaultValueRange.contains(v));
+        i++;
+      }
+    }
+    assertTrue("There should be at most 2 resources", i <= 2);
+  }
+
+  @Test
+  public void testGetScalarResourcesWithRole() {
+    TaskUtils taskUtils = new TaskUtils(cfgWithRole);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
3.0, 2.0), "cpus", 1.0, 0.0), "cpus", 1.0, 0.0);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
0.0, 2.0), "cpus", 1.0, 1.0), "cpus", 0.0, 1.0);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
1.5, 2.0), "cpus", 2.0, 1.0), "cpus", 0.5, 1.5);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
1.5, 2.0), "cpus", 1.5, 2.0), "cpus", 0.0, 1.5);
+  }
+
+  @Test
+  public void testGetScalarResources() {
+    TaskUtils taskUtils = new TaskUtils(cfg);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
0.0, 2.0), "cpus", 1.0, 0.0), "cpus", 0.0, 1.0);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
0.0, 2.0), "cpus", 1.0, 1.0), "cpus", 0.0, 1.0);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
0.0, 2.0), "cpus", 1.0, 1.0), "cpus", 0.0, 1.0);
+    checkResourceList(taskUtils.getScalarResource(createScalarOffer("cpus", 
0.0, 2.0), "cpus", 0.5, 1.5), "cpus", 0.0, 0.5);
+  }
+
+  @Test
+  public void testContainerInfo() {
+    TaskUtils taskUtils = new TaskUtils(cfgWithDocker);
+    Protos.ContainerInfo containerInfo = taskUtils.getContainerInfo();
+    assertTrue("The container should have a docker", 
containerInfo.hasDocker());
+    assertTrue("There should be two volumes", containerInfo.getVolumesCount() 
== 2);
+    assertTrue("The first volume should be read only", 
containerInfo.getVolumes(0).getMode().equals(Protos.Volume.Mode.RO));
+    assertTrue("The first volume should be read write", 
containerInfo.getVolumes(1).getMode().equals(Protos.Volume.Mode.RW));
+  }
+
+  @Test public void testDockerInfo() {
+    TaskUtils taskUtils = new TaskUtils(cfgWithDocker);
+    Protos.ContainerInfo containerInfo = taskUtils.getContainerInfo();
+    assertTrue("The container should have a docker", 
containerInfo.hasDocker());
+    assertTrue("There should be two volumes", 
containerInfo.getVolumesList().size() == 2);
+    assertTrue("There should be a docker image", 
containerInfo.getDocker().hasImage());
+    assertTrue("The docker image should be mesos/myraid", 
containerInfo.getDocker().getImage().equals("mesos/myriad"));
+    assertTrue("Should be using host networking", 
containerInfo.getDocker().getNetwork().equals(Protos.ContainerInfo.DockerInfo.Network.HOST));
+    assertTrue("There should be two parameters", 
containerInfo.getDocker().getParametersList().size() == 2);
+    assertTrue("Privledged mode should be false", 
containerInfo.getDocker().getPrivileged() == false);
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/test/resources/myriad-config-test-default-with-docker-info.yml
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/test/resources/myriad-config-test-default-with-docker-info.yml
 
b/myriad-scheduler/src/test/resources/myriad-config-test-default-with-docker-info.yml
new file mode 100644
index 0000000..221607a
--- /dev/null
+++ 
b/myriad-scheduler/src/test/resources/myriad-config-test-default-with-docker-info.yml
@@ -0,0 +1,90 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+mesosMaster: 10.0.2.15:5050
+checkpoint: false
+frameworkFailoverTimeout: 43200000
+frameworkName: MyriadTest
+frameworkRole: test
+frameworkUser: hduser     # User the Node Manager runs as, required if 
nodeManagerURI set, otherwise defaults to the user
+                          # running the resource manager.
+frameworkSuperUser: root  # To be depricated, currently permissions need set 
by a superuser due to Mesos-1790.  Must be
+                          # root or have passwordless sudo. Required if 
nodeManagerURI set, ignored otherwise.
+nativeLibrary: /usr/local/lib/libmesos.so
+zkServers: localhost:2181
+zkTimeout: 20000
+restApiPort: 8192
+profiles:
+  small:
+    cpu: 1
+    mem: 1100
+  medium:
+    cpu: 2
+    mem: 2048
+  large:
+    cpu: 4
+    mem: 4096
+rebalancer: false
+nodemanager:
+  jvmMaxMemoryMB: 1024
+  cpus: 0.2
+  cgroups: false
+executor:
+  jvmMaxMemoryMB: 256
+  path: file:///usr/local/libexec/mesos/myriad-executor-runnable-0.1.0.jar
+  #The following should be used for a remotely distributed URI, hdfs assumed 
but other URI types valid.
+  nodeManagerUri: hdfs://namenode:port/dist/hadoop-2.7.0.tar.gz
+  #path: hdfs://namenode:port/dist/myriad-executor-runnable-0.1.0.jar
+yarnEnvironment:
+  YARN_HOME: /usr/local/hadoop
+  #YARN_HOME: hadoop-2.7.0 #this should be relative if nodeManagerUri is set
+  #JAVA_HOME: /usr/lib/jvm/java-default #System dependent, but sometimes 
necessary
+services:
+ jobhistory:
+   jvmMaxMemoryMB: 1024
+   cpus: 1
+   ports:
+     myriad.mapreduce.jobhistory.admin.address: 0
+     myriad.mapreduce.jobhistory.address: 0
+     myriad.mapreduce.jobhistory.webapp.address: 0
+   envSettings: -Dcluster.name.prefix=/mycluster
+   taskName: jobhistory
+ timelineserver:
+   jvmMaxMemoryMB: 1024
+   cpus: 1
+   envSettings: -Dcluster.name.prefix=/mycluster2
+   taskName: timelineserver
+containerInfo:
+ type: DOCKER
+ dockerInfo:
+   image: mesos/myriad
+   parameters:
+     -
+       key: volume-driver
+       value: keywhiz
+     -
+       key: volume
+       value: all-my-secrets:/etc/secrets
+ volumes:
+   -
+     hostPath: /srv/data1/hadoop-yarn
+     containerPath: /data1/hadoop-yarn
+     mode: RO
+   -
+     hostPath: /srv/data2/hadoop-yarn
+     containerPath: /data2/hadoop-yarn
+     mode: RW
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-myriad/blob/bcd1fc8c/myriad-scheduler/src/test/resources/myriad-config-test-default-with-framework-role.yml
----------------------------------------------------------------------
diff --git 
a/myriad-scheduler/src/test/resources/myriad-config-test-default-with-framework-role.yml
 
b/myriad-scheduler/src/test/resources/myriad-config-test-default-with-framework-role.yml
new file mode 100644
index 0000000..ab7f137
--- /dev/null
+++ 
b/myriad-scheduler/src/test/resources/myriad-config-test-default-with-framework-role.yml
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+mesosMaster: 10.0.2.15:5050
+checkpoint: false
+frameworkFailoverTimeout: 43200000
+frameworkName: MyriadTest
+frameworkRole: test
+frameworkUser: hduser     # User the Node Manager runs as, required if 
nodeManagerURI set, otherwise defaults to the user
+                          # running the resource manager.
+frameworkSuperUser: root  # To be depricated, currently permissions need set 
by a superuser due to Mesos-1790.  Must be
+                          # root or have passwordless sudo. Required if 
nodeManagerURI set, ignored otherwise.
+nativeLibrary: /usr/local/lib/libmesos.so
+zkServers: localhost:2181
+zkTimeout: 20000
+restApiPort: 8192
+profiles:
+  small:
+    cpu: 1
+    mem: 1100
+  medium:
+    cpu: 2
+    mem: 2048
+  large:
+    cpu: 4
+    mem: 4096
+rebalancer: false
+nodemanager:
+  jvmMaxMemoryMB: 1024
+  cpus: 0.2
+  cgroups: false
+executor:
+  jvmMaxMemoryMB: 256
+  path: file:///usr/local/libexec/mesos/myriad-executor-runnable-0.1.0.jar
+  #The following should be used for a remotely distributed URI, hdfs assumed 
but other URI types valid.
+  nodeManagerUri: hdfs://namenode:port/dist/hadoop-2.7.0.tar.gz
+  #path: hdfs://namenode:port/dist/myriad-executor-runnable-0.1.0.jar
+yarnEnvironment:
+  YARN_HOME: /usr/local/hadoop
+  #YARN_HOME: hadoop-2.7.0 #this should be relative if nodeManagerUri is set
+  #JAVA_HOME: /usr/lib/jvm/java-default #System dependent, but sometimes 
necessary
+services:
+   jobhistory:
+     jvmMaxMemoryMB: 1024
+     cpus: 1
+     ports:
+       myriad.mapreduce.jobhistory.admin.address: 0
+       myriad.mapreduce.jobhistory.address: 0
+       myriad.mapreduce.jobhistory.webapp.address: 0
+     envSettings: -Dcluster.name.prefix=/mycluster
+     taskName: jobhistory
+   timelineserver:
+     jvmMaxMemoryMB: 1024
+     cpus: 1
+     envSettings: -Dcluster.name.prefix=/mycluster2
+     taskName: timelineserver
\ No newline at end of file

Reply via email to