Repository: incubator-eagle
Updated Branches:
  refs/heads/master 6f29955ae -> 0637cad6c


[EAGLE-53] Initially add eagle-docker


Project: http://git-wip-us.apache.org/repos/asf/incubator-eagle/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-eagle/commit/db5d61a6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-eagle/tree/db5d61a6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-eagle/diff/db5d61a6

Branch: refs/heads/master
Commit: db5d61a668b808d0d53ad5af8c2b9a8ef30c5e9d
Parents: 5bffdfd
Author: qinzhaokun <qinzhao...@gmail.com>
Authored: Tue Nov 24 02:09:10 2015 -0700
Committer: qinzhaokun <qinzhao...@gmail.com>
Committed: Tue Nov 24 02:09:10 2015 -0700

----------------------------------------------------------------------
 eagle-external/eagle-docker/Dockerfile          |  24 +++
 eagle-external/eagle-docker/LICENSE             | 209 +++++++++++++++++++
 eagle-external/eagle-docker/README.md           |  54 +++++
 eagle-external/eagle-docker/deploy-eagle.sh     |  70 +++++++
 eagle-external/eagle-docker/eagle-functions     | 143 +++++++++++++
 .../eagle-docker/eagle-multinode.json           | 163 +++++++++++++++
 .../eagle-docker/eagle-singlenode.json          | 122 +++++++++++
 eagle-external/eagle-docker/install-cluster.sh  |  22 ++
 .../eagle-docker/serf/etc/ambari.json           |   9 +
 eagle-external/eagle-docker/serf/handlers/eagle |   7 +
 eagle-external/eagle-docker/wait-for-eagle.sh   |  23 ++
 11 files changed, 846 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/Dockerfile
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/Dockerfile 
b/eagle-external/eagle-docker/Dockerfile
new file mode 100644
index 0000000..12ebe0f
--- /dev/null
+++ b/eagle-external/eagle-docker/Dockerfile
@@ -0,0 +1,24 @@
+FROM sequenceiq/ambari:1.7.0
+
+MAINTAINER Zqin
+
+ENV EAGLE_DOWNLOAD_LINK http://66.211.190.194/eagle-0.1.0.tar.gz
+
+RUN curl -sL $EAGLE_DOWNLOAD_LINK | tar -xz -C /usr/local/
+RUN cd /usr/local && ln -s ./eagle-0.1.0 eagle
+ENV EAGLE_HOME=/usr/local/eagle
+
+
+RUN yum install -y httpd ganglia ganglia-gmetad ganglia-gmond ganglia-web 
nagios kafka zookeeper storm hbase tez hadoop snappy snappy-devel 
hadoop-libhdfs ambari-log4j hive hive-hcatalog hive-webhcat webhcat-tar-hive 
webhcat-tar-pig mysql-connector-java mysql-server
+
+ADD serf /usr/local/serf
+
+RUN mkdir -p /var/log/httpd
+
+ADD install-cluster.sh /tmp/
+ADD eagle-singlenode.json /tmp/
+ADD eagle-multinode.json /tmp/
+ADD wait-for-eagle.sh /tmp/
+ADD deploy-eagle.sh /usr/local/eagle/deploy.sh
+
+EXPOSE 9099 8744

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/LICENSE
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/LICENSE 
b/eagle-external/eagle-docker/LICENSE
new file mode 100644
index 0000000..b9c8eef
--- /dev/null
+++ b/eagle-external/eagle-docker/LICENSE
@@ -0,0 +1,209 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+==============================================================================
+Apache Eagle (incubating) Subcomponents:
+
+The Apache Eagle project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/README.md
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/README.md 
b/eagle-external/eagle-docker/README.md
new file mode 100644
index 0000000..09b8747
--- /dev/null
+++ b/eagle-external/eagle-docker/README.md
@@ -0,0 +1,54 @@
+# Eagle in Docker
+
+> Docker image for Apache Eagle http://eagle.incubator.apache.org
+
+This is docker container for eagle to help users to have a quick preview about 
eagle features. 
+And this project is to build apache/eagle images and provide eagle-functions 
to start the containers of eagle.
+
+## Prerequisite
+* Docker environment (see [https://www.docker.com](https://www.docker.com/)) 
+
+## Installation & Usage
+1. **Build Image**: Go to the root directory where the 
[Dockerfile](Dockerfile) is in, build image with following command:
+ 
+        docker built -t apache/eagle . 
+ 
+    > The docker image is named `apache/eagle`. Eagle docker image is based on 
[`ambari:1.7.0`](https://github.com/sequenceiq/docker-ambari), it will install 
ganglia, hbase,hive,storm,kafka and so on in this image. Add startup script and 
buleprint file into image. 
+
+2. **Verify Image**: After building the `apache/eagle` image successfully, 
verify the images and could find eagle image.
+
+        docker images
+
+3. **Deploy Image**: This project also provides helper functions in script 
[eagle-functions](eagle-functions) for convenience.
+  
+        # Firstly, load the helper functions into context
+        source eagle-functions
+            
+        # Secondly, start to deploy eagle cluster
+    
+        # (1) start single-node container
+        eagle-deploy-cluster 1 
+
+        # (2) Or muti-node containers
+        eagle-deploy-cluster 3 
+
+4. **Find IP and Port Mapping**: After the container is up and running. The 
first thing you need to do is finding the IP address and port mapping of the 
docker container:
+
+        docker inspect -f '{{ .NetworkSettings.IPAddress }}' eagle-server
+        docker ps
+
+5. **Start to use Eagle**: Congratulations! You are able to start using Eagle 
now. Please open eagle ui at following address (username: ADMIN, password: 
secret by default)
+
+        http://{{container_ip}}:9099  
+
+6. **Manage Eagle Cluster**: This step is about how to managing the eagle 
cluster though not must-have at starting. Eagle docker depends on Ambari to 
manage the cluster infrastructure of Eagle. Following are some helpful links:
+
+  * Ambari UI: `http://{{container_ip}}:8080` (username: ADMIN, password: 
ADMIN)
+  * Storm UI: `http://{{container_ip}}:8744`
+
+## Get Help
+The fastest way to get response from eagle community is to send email to the 
mail list 
[d...@eagle.incubator.apache.org](mailto:d...@eagle.incubator.apache.org),
+and remember to subscribe our mail list via 
[dev-subscr...@eagle.incubator.apache.org](mailto:dev-subscr...@eagle.incubator.apache.org)
+
+## License
+Licensed under the [Apache License, Version 
2.0](http://www.apache.org/licenses/LICENSE-2.0). More details, please refer to 
[LICENSE](LICENSE) file.

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/deploy-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/deploy-eagle.sh 
b/eagle-external/eagle-docker/deploy-eagle.sh
new file mode 100755
index 0000000..2b78fde
--- /dev/null
+++ b/eagle-external/eagle-docker/deploy-eagle.sh
@@ -0,0 +1,70 @@
+#!/bin/sh
+
+set -o pipefail  # trace ERR through pipes
+set -o errtrace  # trace ERR through 'time command' and other functions
+
+function error() {
+SCRIPT="$0"           # script name
+LASTLINE="$1"         # line of error occurrence
+LASTERR="$2"          # error code
+echo "ERROR exit from ${SCRIPT} : line ${LASTLINE} with exit code ${LASTERR}"
+exit 1
+}
+
+trap 'error ${LINENO} ${?}' ERR
+
+echo ""
+echo "Welcome to try Eagle"
+echo ""
+
+echo "Eagle home folder path is $EAGLE_HOME"
+cd $EAGLE_HOME
+
+
+echo "Initializing Eagle Service ..."
+sh ./bin/eagle-service-init.sh
+
+sleep 10
+
+echo "Starting Eagle Service ..."
+sh ./bin/eagle-service.sh start
+
+sleep 10
+
+echo "Creating kafka topics for eagle ... "
+KAFKA_HOME=/usr/hdp/current/kafka-broker
+EAGLE_ZOOKEEPER_QUORUM=localhost:2181
+topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper 
$EAGLE_ZOOKEEPER_QUORUM --topic sandbox_hdfs_audit_log`
+if [ -z $topic ]; then
+        $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper 
$EAGLE_ZOOKEEPER_QUORUM --replication-factor 1 --partitions 1 --topic 
sandbox_hdfs_audit_log
+fi
+
+if [ $? = 0 ]; then
+echo "==> Create kafka topic successfully for eagle"
+else
+echo "==> Failed, exiting"
+exit 1
+fi
+
+EAGLE_NIMBUS_HOST=eagle-server.apache.org
+EAGLE_SERVICE_HOST=eagle-server.apache.org
+EAGLE_TOPOLOGY_JAR=`ls 
${EAGLE_HOME}/lib/topology/eagle-topology-*-assembly.jar`
+
+${EAGLE_HOME}/bin/eagle-topology-init.sh
+[ $? != 0 ] && exit 1
+${EAGLE_HOME}/examples/sample-sensitivity-resource-create.sh
+[ $? != 0 ] && exit 1
+${EAGLE_HOME}/examples/sample-policy-create.sh
+[ $? != 0 ] && exit 1
+storm jar $EAGLE_TOPOLOGY_JAR 
eagle.security.auditlog.HdfsAuditLogProcessorMain -D 
config.file=${EAGLE_HOME}/conf/sandbox-hdfsAuditLog-application.conf  -D 
eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
+[ $? != 0 ] && exit 1
+storm jar $EAGLE_TOPOLOGY_JAR 
eagle.security.hive.jobrunning.HiveJobRunningMonitoringMain -D 
config.file=${EAGLE_HOME}/conf/sandbox-hiveQueryLog-application.conf  -D 
eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
+[ $? != 0 ] && exit 1
+storm jar $EAGLE_TOPOLOGY_JAR 
eagle.security.userprofile.UserProfileDetectionMain -D 
config.file=${EAGLE_HOME}/conf/sandbox-userprofile-topology.conf  -D 
eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
+[ $? != 0 ] && exit 1
+
+# TODO: More eagle start
+
+echo "Eagle is deployed successfully!"
+
+echo "Please visit http://<your_sandbox_ip>:9099 to play with Eagle!"

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/eagle-functions
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-functions 
b/eagle-external/eagle-docker/eagle-functions
new file mode 100644
index 0000000..3895add
--- /dev/null
+++ b/eagle-external/eagle-docker/eagle-functions
@@ -0,0 +1,143 @@
+: ${NODE_PREFIX=eagle}
+: ${AMBARI_SERVER_NAME:=${NODE_PREFIX}-server}
+: ${MYDOMAIN:=apache.org}
+: ${IMAGE:="apache/eagle:latest"}
+: ${DOCKER_OPTS:="--dns 127.0.0.1 --entrypoint 
/usr/local/serf/bin/start-serf-agent.sh -e KEYCHAIN=$KEYCHAIN"}
+: ${CLUSTER_SIZE:=1}
+: ${DEBUG:=1}
+: ${SLEEP_TIME:=2}
+: ${DRY_RUN:=false}
+run-command() {
+  CMD="$@"
+  if [ "$DRY_RUN" == "false" ]; then
+    debug "$CMD"
+    "$@"
+  else
+    debug [DRY_RUN] "$CMD"
+  fi
+}
+
+amb-clean() {
+  unset NODE_PREFIX AMBARI_SERVER_NAME MYDOMAIN IMAGE DOCKER_OPTS DEBUG 
SLEEP_TIME AMBARI_SERVER_IP DRY_RUN
+}
+
+get-ambari-server-ip() {
+  AMBARI_SERVER_IP=$(get-host-ip ${AMBARI_SERVER_NAME})
+}
+
+get-host-ip() {
+  HOST=$1
+  docker inspect --format="{{.NetworkSettings.IPAddress}}" ${HOST}
+}
+
+amb-members() {
+  get-ambari-server-ip
+  serf members --rpc-addr $(docker inspect --format 
"{{.NetworkSettings.IPAddress}}" ${AMBARI_SERVER_NAME}):7373
+}
+
+amb-settings() {
+  cat <<EOF
+  NODE_PREFIX=$NODE_PREFIX
+  MYDOMAIN=$MYDOMAIN
+  CLUSTER_SIZE=$CLUSTER_SIZE
+  AMBARI_SERVER_NAME=$AMBARI_SERVER_NAME
+  IMAGE=$IMAGE
+  DOCKER_OPTS=$DOCKER_OPTS
+  AMBARI_SERVER_IP=$AMBARI_SERVER_IP
+  DRY_RUN=$DRY_RUN
+EOF
+}
+
+debug() {
+  [ $DEBUG -gt 0 ] && echo [DEBUG] "$@" 1>&2
+}
+
+docker-ps() {
+  #docker ps|sed "s/ \{3,\}/#/g"|cut -d '#' -f 1,2,7|sed "s/#/\t/g"
+  docker inspect --format="{{.Name}} {{.NetworkSettings.IPAddress}} 
{{.Config.Image}} {{.Config.Entrypoint}} {{.Config.Cmd}}" $(docker ps -q)
+}
+
+docker-psa() {
+  #docker ps|sed "s/ \{3,\}/#/g"|cut -d '#' -f 1,2,7|sed "s/#/\t/g"
+  docker inspect --format="{{.Name}} {{.NetworkSettings.IPAddress}} 
{{.Config.Image}} {{.Config.Entrypoint}} {{.Config.Cmd}}" $(docker ps -qa)
+}
+
+amb-start-cluster() {
+  local act_cluster_size=$1
+  : ${act_cluster_size:=$CLUSTER_SIZE}
+  echo starting an ambari cluster with: $act_cluster_size nodes
+
+  amb-start-first
+  [ $act_cluster_size -gt 1 ] && for i in $(seq $((act_cluster_size - 1))); do
+    amb-start-node $i
+  done
+}
+
+_amb_run_shell() {
+  COMMAND=$1
+  : ${COMMAND:? required}
+  get-ambari-server-ip
+  NODES=$(docker inspect --format="{{.Config.Image}} {{.Name}}" $(docker ps 
-q)|grep $IMAGE|grep $NODE_PREFIX|wc -l|xargs)
+  run-command docker run -it --rm -e EXPECTED_HOST_COUNT=$NODES -e 
BLUEPRINT=$BLUEPRINT --link ${AMBARI_SERVER_NAME}:ambariserver --entrypoint 
/bin/sh $IMAGE -c $COMMAND
+}
+
+amb-shell() {
+  _amb_run_shell /tmp/ambari-shell.sh
+}
+
+eagle-deploy-cluster() {
+  local act_cluster_size=$1
+  : ${act_cluster_size:=$CLUSTER_SIZE}
+
+  if [ $# -gt 1 ]; then
+    BLUEPRINT=$2
+  else
+    [ $act_cluster_size -gt 1 ] && BLUEPRINT=hdp-multinode-eagle || 
BLUEPRINT=hdp-singlenode-eagle
+  fi
+
+  : ${BLUEPRINT:?" required (hdp-singlenode-eagle / hdp-multinode-eagle)"}
+
+  amb-start-cluster $act_cluster_size
+  _amb_run_shell /tmp/install-cluster.sh
+}
+
+amb-start-first() {
+  run-command docker run -P -d $DOCKER_OPTS --name $AMBARI_SERVER_NAME -h 
$AMBARI_SERVER_NAME.$MYDOMAIN --privileged=true $IMAGE --tag ambari-server=true
+}
+
+amb-copy-to-hdfs() {
+  get-ambari-server-ip
+  FILE_PATH=${1:?"usage: <FILE_PATH> <NEW_FILE_NAME_ON_HDFS> <HDFS_PATH>"}
+  FILE_NAME=${2:?"usage: <FILE_PATH> <NEW_FILE_NAME_ON_HDFS> <HDFS_PATH>"}
+  DIR=${3:?"usage: <FILE_PATH> <NEW_FILE_NAME_ON_HDFS> <HDFS_PATH>"}
+  amb-create-hdfs-dir $DIR
+  DATANODE=$(curl -si -X PUT 
"http://$AMBARI_SERVER_IP:50070/webhdfs/v1$DIR/$FILE_NAME?user.name=hdfs&op=CREATE";
 |grep Location | sed "s/\..*//; s@.*http://@@";)
+  DATANODE_IP=$(get-host-ip $DATANODE)
+  curl -T $FILE_PATH 
"http://$DATANODE_IP:50075/webhdfs/v1$DIR/$FILE_NAME?op=CREATE&user.name=hdfs&overwrite=true&namenoderpcaddress=$AMBARI_SERVER_IP:8020";
+}
+
+amb-create-hdfs-dir() {
+  get-ambari-server-ip
+  DIR=$1
+  curl -X PUT 
"http://$AMBARI_SERVER_IP:50070/webhdfs/v1$DIR?user.name=hdfs&op=MKDIRS"; > 
/dev/null 2>&1
+}
+
+amb-scp-to-first() {
+  get-ambari-server-ip
+  FILE_PATH=${1:?"usage: <FILE_PATH> <DESTINATION_PATH>"}
+  DEST_PATH=${2:?"usage: <FILE_PATH> <DESTINATION_PATH>"}
+  scp $FILE_PATH root@$AMBARI_SERVER_IP:$DEST_PATH
+}
+
+amb-start-node() {
+  get-ambari-server-ip
+  : ${AMBARI_SERVER_IP:?"AMBARI_SERVER_IP is needed"}
+  NUMBER=${1:?"please give a <NUMBER> parameter it will be used as 
node<NUMBER>"}
+  if [ $# -eq 1 ] ;then
+    MORE_OPTIONS="-d"
+  else
+    shift
+    MORE_OPTIONS="$@"
+  fi
+  run-command docker run $MORE_OPTIONS -e SERF_JOIN_IP=$AMBARI_SERVER_IP 
$DOCKER_OPTS --name ${NODE_PREFIX}$NUMBER -h ${NODE_PREFIX}${NUMBER}.$MYDOMAIN 
$IMAGE --log-level debug
+}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/eagle-multinode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-multinode.json 
b/eagle-external/eagle-docker/eagle-multinode.json
new file mode 100644
index 0000000..b1735b3
--- /dev/null
+++ b/eagle-external/eagle-docker/eagle-multinode.json
@@ -0,0 +1,163 @@
+{
+"configurations": [
+    {
+      "hdfs-site": {
+        "dfs.permissions.enabled": "false"
+      },
+      "hive-site": {
+        "javax.jdo.option.ConnectionUserName": "hive",
+        "javax.jdo.option.ConnectionPassword": "hive"
+      }
+    },
+    {
+       "hadoop-env": {
+          "properties" : {
+            "content" : "\r\n# Set Hadoop-specific environment variables 
here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others 
are\r\n# optional.  When running a distributed configuration it is best to\r\n# 
set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote 
nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport 
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop 
home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# 
Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# 
Path to jsvc required by secure HDP 2.0 datanode\r\nexport 
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. 
Default is 1000.\r\nexport 
HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java 
runtime options.  Empty by default.\r\nexport 
HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS 
when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT 
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps 
-XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport 
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m 
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m 
-Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following 
applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as 
after dropping privileges\r\nexport 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
 Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o 
ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are 
stored.  $HADOOP_HOME/logs by default.\r\nexport 
HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server 
logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# 
Where log files are stored in the secure data environment.\r\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop 
code should be rsync'd from.  Unset by default.\r\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between 
slave commands.  Unset by default.  This\r\n# can be useful in large clusters, 
where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can 
service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where 
pid files are stored. /tmp by default.\r\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 History server pid\r\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
 A string representing this instance of hadoop. $USER by default.\r\nexport 
HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon 
processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from 
standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql 
connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required 
by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 
2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# 
Add libraries required by 
nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/local/eagle/lib/log4jkafka/lib/*\r\n\r\n#
 added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; 
then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned 
RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n 
   export 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-clien
 t/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command 
line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly 
required for hadoop 2.0\r\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport 
HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
+         }
+        }
+    },
+    {
+      "hdfs-log4j": {
+        "properties": {
+          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation 
(ASF) under one\r\n# or more contributor license agreements.  See the NOTICE 
file\r\n# distributed with this work for additional information\r\n# regarding 
copyright ownership.  The ASF licenses this file\r\n# to you under the Apache 
License, Version 2.0 (the\r\n# \"License\"); you may not use this file except 
in compliance\r\n# with the License.  You may obtain a copy of the License 
at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless 
required by applicable law or agreed to in writing,\r\n# software distributed 
under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES 
OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for 
the\r\n# specific language governing permissions and limitations\r\n# under the 
License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by 
system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in 
hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
 Define the root logger to the system property 
\"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# 
Daily Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 
30-day 
backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\r\n# Debugging Pattern 
format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use 
this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default 
values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 
n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
 hdfs audit l
 
ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-server.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.ConversionPattern=%d{ISO8
 601} %p %c{2}: 
%m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
 mapred audit 
logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# 
Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Logfile size and and 30-day 
backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appender.RFA.MaxBackupInde
 
x=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging 
levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
 Jets3t 
library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
 Null Appender\r\n# Trap security logger on the hadoop client 
side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
 Event Counter Appender\r\n# Sends counts of logging messages at different 
severity levels to Hadoop 
Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.Ev
 entCounter\r\n\r\n# Removes \"deprecated\" 
messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
 HDFS block state change log from block manager\r\n#\r\n# Uncomment the 
following to suppress normal block state change\r\n# messages from BlockManager 
in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+         }
+      }
+    }
+  ],
+  "host_groups": [
+    {
+      "name": "master",
+      "components": [
+        {
+          "name": "APP_TIMELINE_SERVER"
+        },
+        {
+          "name": "HISTORYSERVER"
+        },
+        {
+          "name": "HBASE_REGIONSERVER"
+        },
+        {
+          "name": "HBASE_CLIENT"
+        },
+        {
+          "name": "WEBHCAT_SERVER"
+        },
+        {
+          "name": "HCAT"
+        },
+        {
+          "name": "NAMENODE"
+        },
+        {
+          "name": "AMBARI_SERVER"
+        },
+        {
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "name": "HIVE_CLIENT"
+        },
+        {
+          "name": "NODEMANAGER"
+        },
+        {
+          "name": "DATANODE"
+        },
+        {
+          "name": "RESOURCEMANAGER"
+        },
+        {
+          "name": "ZOOKEEPER_SERVER"
+        },
+        {
+          "name": "ZOOKEEPER_CLIENT"
+        },
+        {
+          "name": "HBASE_MASTER"
+        },
+        {
+          "name": "HIVE_SERVER"
+        },
+        {
+          "name": "SECONDARY_NAMENODE"
+        },
+        {
+          "name": "HIVE_METASTORE"
+        },
+        {
+          "name": "YARN_CLIENT"
+        },
+        {
+          "name": "MAPREDUCE2_CLIENT"
+        },
+        {
+          "name": "MYSQL_SERVER"
+        },
+       { 
+         "name": "GANGLIA_SERVER"
+       },
+       {
+         "name" : "DRPC_SERVER"
+       },
+       {
+         "name" : "STORM_UI_SERVER"
+       },
+       {
+          "name" : "NIMBUS"
+        },
+        {
+          "name" : "KAFKA_BROKER"
+        }
+      ],
+      "cardinality": "1"
+    },
+    {
+      "name": "slave_1",
+      "components": [
+        {
+          "name": "HBASE_CLIENT"
+        },
+        {
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "name": "HIVE_CLIENT"
+        },
+        {
+          "name": "NODEMANAGER"
+        },
+        {
+          "name": "DATANODE"
+        },
+        { 
+          "name": "ZOOKEEPER_SERVER"
+        },
+        {
+          "name": "ZOOKEEPER_CLIENT"
+        },
+        {
+          "name": "YARN_CLIENT"
+        },
+        {
+          "name": "MAPREDUCE2_CLIENT"
+        },
+       {
+         "name" : "KAFKA_BROKER"
+       },
+       { 
+         "name": "GANGLIA_MONITOR"
+       },
+        {
+          "name" : "SUPERVISOR"
+        }
+      ],
+      "cardinality": "1"
+    }
+  ],
+  "Blueprints": {
+    "blueprint_name": "hdp-multinode-eagle",
+    "stack_name": "HDP",
+    "stack_version": "2.2"
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/eagle-singlenode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/eagle-singlenode.json 
b/eagle-external/eagle-docker/eagle-singlenode.json
new file mode 100644
index 0000000..4c2c6e8
--- /dev/null
+++ b/eagle-external/eagle-docker/eagle-singlenode.json
@@ -0,0 +1,122 @@
+{
+"configurations": [
+    {
+      "hdfs-site": {
+        "dfs.permissions.enabled": "false"
+      },
+      "hive-site": {
+        "javax.jdo.option.ConnectionUserName": "hive",
+        "javax.jdo.option.ConnectionPassword": "hive"
+      }
+    },
+    {
+       "hadoop-env": {
+          "properties" : {
+            "content" : "\r\n# Set Hadoop-specific environment variables 
here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others 
are\r\n# optional.  When running a distributed configuration it is best to\r\n# 
set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote 
nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport 
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop 
home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# 
Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# 
Path to jsvc required by secure HDP 2.0 datanode\r\nexport 
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. 
Default is 1000.\r\nexport 
HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java 
runtime options.  Empty by default.\r\nexport 
HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=tr
 ue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS 
when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT 
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d
 %H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps 
-XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport 
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m 
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m 
-Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCE
 R_OPTS}\"\r\n\r\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following 
applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as 
after dropping privileges\r\nexport 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
 Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o 
ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are 
stored.  $HADOOP_HOME/logs by default.\r\nexport 
HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server 
logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# 
Where log files are stored in the secure data environment.\r\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# e
 xport HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop 
code should be rsync'd from.  Unset by default.\r\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between 
slave commands.  Unset by default.  This\r\n# can be useful in large clusters, 
where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can 
service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where 
pid files are stored. /tmp by default.\r\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 History server pid\r\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
 A string representing this instance of hadoop. $USER by default.\r\nexport 
HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon 
processes.  See 'm
 an nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from 
standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql 
connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required 
by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 
2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# 
Add libraries required by 
nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/local/eagle/lib/log4jkafka/lib/*\r\n\r\n#
 added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; 
then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned 
RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n 
   export 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-clien
 t/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command 
line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly 
required for hadoop 2.0\r\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport 
HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
+         }
+        }
+    },
+    {
+      "hdfs-log4j": {
+        "properties": {
+          "content" : "\r\n#\r\n# Licensed to the Apache Software Foundation 
(ASF) under one\r\n# or more contributor license agreements.  See the NOTICE 
file\r\n# distributed with this work for additional information\r\n# regarding 
copyright ownership.  The ASF licenses this file\r\n# to you under the Apache 
License, Version 2.0 (the\r\n# \"License\"); you may not use this file except 
in compliance\r\n# with the License.  You may obtain a copy of the License 
at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless 
required by applicable law or agreed to in writing,\r\n# software distributed 
under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES 
OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for 
the\r\n# specific language governing permissions and limitations\r\n# under the 
License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by 
system properties\r\n# To change daemon root logger use hadoo
 p_root_logger in 
hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
 Define the root logger to the system property 
\"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# 
Daily Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 
30-day 
backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\r\n# Debugging Pattern 
format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlog
 ger above if you want to use 
this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default 
values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\
 
n#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
 hdfs audit l
 
ogging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=eagle-server.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.ConversionPattern=%d{ISO8
 601} %p %c{2}: 
%m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
 mapred audit 
logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# 
Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Logfile size and and 30-day 
backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appender.RFA.MaxBackupInde
 
x=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging 
levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
 Jets3t 
library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
 Null Appender\r\n# Trap security logger on the hadoop client 
side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
 Event Counter Appender\r\n# Sends counts of logging messages at different 
severity levels to Hadoop 
Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.Ev
 entCounter\r\n\r\n# Removes \"deprecated\" 
messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
 HDFS block state change log from block manager\r\n#\r\n# Uncomment the 
following to suppress normal block state change\r\n# messages from BlockManager 
in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+         }
+      }
+    }
+  ],
+  "host_groups": [
+    {
+      "name": "master",
+      "components": [
+        {
+          "name": "APP_TIMELINE_SERVER"
+        },
+        {
+          "name": "HISTORYSERVER"
+        },
+        {
+          "name": "HBASE_REGIONSERVER"
+        },
+        {
+          "name": "WEBHCAT_SERVER"
+        },
+        {
+          "name": "HCAT"
+        },
+        {
+          "name": "HBASE_CLIENT"
+        },
+        {
+          "name": "NAMENODE"
+        },
+        {
+          "name": "AMBARI_SERVER"
+        },
+        {
+          "name": "HDFS_CLIENT"
+        },
+        {
+          "name": "HIVE_CLIENT"
+        },
+        {
+          "name": "NODEMANAGER"
+        },
+        {
+          "name": "DATANODE"
+        },
+        {
+          "name": "RESOURCEMANAGER"
+        },
+        {
+          "name": "ZOOKEEPER_SERVER"
+        },
+        {
+          "name": "ZOOKEEPER_CLIENT"
+        },
+        {
+          "name": "HBASE_MASTER"
+        },
+        {
+          "name": "HIVE_SERVER"
+        },
+        {
+          "name": "SECONDARY_NAMENODE"
+        },
+        {
+          "name": "HIVE_METASTORE"
+        },
+        {
+          "name": "YARN_CLIENT"
+        },
+        {
+          "name": "MAPREDUCE2_CLIENT"
+        },
+        {
+          "name": "MYSQL_SERVER"
+        },
+        { "name": "GANGLIA_SERVER"},
+
+       { "name": "GANGLIA_MONITOR"},
+       
+       { "name": "KAFKA_BROKER"},
+        {
+          "name" : "DRPC_SERVER"
+        },
+        {
+          "name" : "NIMBUS"
+        },
+        {
+          "name" : "STORM_UI_SERVER"
+        },
+        { "name" : "SUPERVISOR"}
+      ],
+
+      "cardinality": "1"
+    }
+  ],
+  "Blueprints": {
+    "blueprint_name": "hdp-singlenode-eagle",
+    "stack_name": "HDP",
+    "stack_version": "2.2"
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/install-cluster.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/install-cluster.sh 
b/eagle-external/eagle-docker/install-cluster.sh
new file mode 100755
index 0000000..eb730ae
--- /dev/null
+++ b/eagle-external/eagle-docker/install-cluster.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+
+export PATH=/usr/jdk64/jdk1.7.0_67/bin:$PATH
+
+
+./ambari-shell.sh << EOF
+blueprint add --file /tmp/eagle-singlenode.json
+blueprint add --file /tmp/eagle-multinode.json
+cluster build --blueprint $BLUEPRINT
+cluster autoAssign
+cluster create --exitOnFinish true
+EOF
+
+clear
+
+SERF_RPC_ADDR=${AMBARISERVER_PORT_7373_TCP##*/}
+serf event --rpc-addr=$SERF_RPC_ADDR eagle
+
+echo "eagle environment is setted up successfully"
+./wait-for-eagle.sh
+

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/serf/etc/ambari.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/serf/etc/ambari.json 
b/eagle-external/eagle-docker/serf/etc/ambari.json
new file mode 100644
index 0000000..4409e84
--- /dev/null
+++ b/eagle-external/eagle-docker/serf/etc/ambari.json
@@ -0,0 +1,9 @@
+{
+  "event_handlers": [
+    "member-join=/usr/local/serf/handlers/ambari-bootstrap",
+    "user:eagle=/usr/local/serf/handlers/eagle"
+  ],
+  "tags" : {
+    "ambari-agent": "true"
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/serf/handlers/eagle
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/serf/handlers/eagle 
b/eagle-external/eagle-docker/serf/handlers/eagle
new file mode 100755
index 0000000..ce07c67
--- /dev/null
+++ b/eagle-external/eagle-docker/serf/handlers/eagle
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# install Eagle on the Ambari server instance
+if [[ "$SERF_TAG_AMBARI_SERVER" == "true" ]] ;then
+  echo run eagle install script
+  nohup /usr/local/eagle/deploy.sh > /var/log/eagle-deploy.log
+fi

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/db5d61a6/eagle-external/eagle-docker/wait-for-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/wait-for-eagle.sh 
b/eagle-external/eagle-docker/wait-for-eagle.sh
new file mode 100755
index 0000000..39909a3
--- /dev/null
+++ b/eagle-external/eagle-docker/wait-for-eagle.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+: ${EAGLE_HOST:=$AMBARISERVER_PORT_9099_TCP_ADDR}
+: ${SLEEP:=2}
+: ${DEBUG:=1}
+
+: ${EAGLE_HOST:? eagle server address is mandatory, fallback is a linked 
containers exposed 9099}
+
+debug() {
+  [ $DEBUG -gt 0 ] && echo [DEBUG] "$@" 1>&2
+}
+
+get-server-state() {
+  curl -s -o /dev/null -w "%{http_code}" 
$AMBARISERVER_PORT_9099_TCP_ADDR:9099/eagle-service/index.html
+}
+
+debug waits for eagle to start on: $EAGLE_HOST
+while ! get-server-state | grep 200 &>/dev/null ; do
+  [ $DEBUG -gt 0 ] && echo -n .
+  sleep $SLEEP
+done
+[ $DEBUG -gt 0 ] && echo
+debug eagle web started: $EAGLE_HOST:9099/eagle
\ No newline at end of file

Reply via email to