Repository: eagle
Updated Branches:
  refs/heads/master aa136afa9 -> 39ab83e62


Docker updates for eagle 0.5

This docker image is for eagle 0.5

There is still an issue where namenode process dies sometimes.
creating this PR to get the review started....

Author: Jay <jhsonl...@gmail.com>

Closes #914 from jhsenjaliya/EAGLE-925.


Project: http://git-wip-us.apache.org/repos/asf/eagle/repo
Commit: http://git-wip-us.apache.org/repos/asf/eagle/commit/39ab83e6
Tree: http://git-wip-us.apache.org/repos/asf/eagle/tree/39ab83e6
Diff: http://git-wip-us.apache.org/repos/asf/eagle/diff/39ab83e6

Branch: refs/heads/master
Commit: 39ab83e625a271a2f3d55a9395c60a43c61f09da
Parents: aa136af
Author: Jay <jhsonl...@gmail.com>
Authored: Wed Jun 28 15:05:58 2017 -0700
Committer: Jay <jhsonl...@gmail.com>
Committed: Wed Jun 28 15:05:58 2017 -0700

----------------------------------------------------------------------
 .../src/main/initdata/eagle-topology-init.sh    |  2 +-
 .../src/main/initdata/sample-policy-create.sh   |  2 +-
 eagle-external/eagle-docker/Dockerfile          | 18 +++++--
 eagle-external/eagle-docker/README.md           |  4 +-
 eagle-external/eagle-docker/bin/eagle-docker.sh | 57 +++++++++++---------
 eagle-external/eagle-docker/bin/eagle-lib.sh    | 12 ++---
 .../eagle-docker/resource/deploy-eagle.sh       | 46 ++++++----------
 .../eagle-docker/resource/eagle-multinode.json  |  4 +-
 .../eagle-docker/resource/eagle-singlenode.json | 27 +++++++---
 .../eagle-docker/resource/install-cluster.sh    |  8 +--
 .../resource/serf/bin/start-serf-agent.sh       | 49 +++++++++++++++++
 .../eagle-docker/resource/wait-for-eagle.sh     | 14 +++--
 .../src/main/resources/application.conf         |  3 ++
 13 files changed, 156 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
index 62b1e83..d3e553a 100755
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
+++ 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
@@ -18,7 +18,7 @@
 export EAGLE_SERVICE_USER=admin
 export EAGLE_SERVICE_PASSWD=secret
 export EAGLE_SERVICE_HOST=localhost
-export EAGLE_SERVICE_PORT=38080
+export EAGLE_SERVICE_PORT=9090
 
 #####################################################################
 #            Import stream metadata for HDFS

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
index 307c542..6e66dc7 100755
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
+++ 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
@@ -22,7 +22,7 @@ export EAGLE_SERVICE_PORT=38080
 
 #### create hdfs policy sample in sandbox
 echo "create hdfs policy sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDefinitionService";
 -d \
+curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/rest/entities?serviceName=AlertDefinitionService";
 -d \
 
'[{"tags":{"site":"sandbox","dataSource":"hdfsAuditLog","alertExecutorId":"hdfsAuditLogAlertExecutor","policyId":"viewPrivate","policyType":"siddhiCEPEngine"},"desc":"view
 private file","policyDef":"{\"type\":\"siddhiCEPEngine\",\"expression\":\"from 
hdfsAuditLogEventStream[(cmd=='\'open\'') and (src=='\'/tmp/private\'')] select 
* insert into outputStream\"}","dedupeDef": 
"{\"alertDedupIntervalMin\":0,\"emailDedupIntervalMin\":1440}","notificationDef":
 "[{\"subject\":\"just for 
test\",\"sender\":\"nob...@test.com\",\"recipients\":\"nob...@test.com\",\"flavor\":\"email\",\"id\":\"email_1\",\"tplFileName\":\"\"}]","remediationDef":"","enabled":true}]'
 
 #### create hive policy sample in sandbox

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/Dockerfile
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/Dockerfile 
b/eagle-external/eagle-docker/Dockerfile
index 4aa3dec..de06bbc 100644
--- a/eagle-external/eagle-docker/Dockerfile
+++ b/eagle-external/eagle-docker/Dockerfile
@@ -15,11 +15,17 @@
 
 # NOTICE: This docker image is built based sequenceiq/ambari which is open 
sourced under github: https://github.com/sequenceiq/docker-ambari
 FROM sequenceiq/ambari:1.7.0
-
-MAINTAINER d...@eagle.incubator.apache.org
+MAINTAINER d...@eagle.apache.org
 
 # Eagle environment
 ENV EAGLE_HOME=/usr/hdp/current/eagle
+ENV JDK_VERSION 8u121
+ENV JDK_BUILD_VERSION b13
+# for local development
+#ADD resource/jdk-8u121-linux-x64.rpm /tmp/
+#RUN rpm -i /tmp/jdk-8u121-linux-x64.rpm
+RUN curl -LO 
"http://download.oracle.com/otn-pub/java/jdk/8u121-b13/e9e7ea248e2c4826b92b3f075a80e441/jdk-8u121-linux-x64.rpm";
 -H 'Cookie: oraclelicense=accept-securebackup-cookie' && rpm -i 
jdk-$JDK_VERSION-linux-x64.rpm;
+ENV JAVA_HOME /usr/java/default
 
 # Install Eagle Package
 #
@@ -33,15 +39,19 @@ ENV EAGLE_HOME=/usr/hdp/current/eagle
 # `docker build` directly, to prepare the eagle package into build/eagle
 ADD target/eagle-current /usr/hdp/current/eagle
 
+
 # Load External Packages
 RUN yum install -y kafka zookeeper storm hbase tez hadoop snappy snappy-devel 
hadoop-libhdfs ambari-log4j hive hive-hcatalog hive-webhcat webhcat-tar-hive 
webhcat-tar-pig mysql-connector-java mysql-server
 
-# Load Static Resources
+ENV SERF_HOME /usr/local/serf
+
 ADD resource/serf /usr/local/serf
+
+# Load Static Resources
 ADD resource/install-cluster.sh /tmp/
 ADD resource/eagle-singlenode.json /tmp/
 ADD resource/eagle-multinode.json /tmp/
 ADD resource/wait-for-eagle.sh /tmp/
 ADD resource/deploy-eagle.sh /usr/hdp/current/eagle/deploy.sh
 
-EXPOSE 9099 8744 8080 2181 2888 6667 60020 60030 60010
+EXPOSE 8744 8080 2181 2888 6667 60020 60030 60010 9090

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/README.md
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/README.md 
b/eagle-external/eagle-docker/README.md
index 105fee0..37253c7 100644
--- a/eagle-external/eagle-docker/README.md
+++ b/eagle-external/eagle-docker/README.md
@@ -43,8 +43,8 @@ The fastest way to get started with Eagle is to run with 
[docker](https://github
 
       docker run -p 9099:9099 -p 8080:8080 -p 8744:8744 -p 2181:2181 -p 
2888:2888 -p 6667:6667 -p 60020:60020 \
         -p 60030:60030 -p 60010:60010 -d --dns 127.0.0.1 --entrypoint 
/usr/local/serf/bin/start-serf-agent.sh \
-        -e KEYCHAIN= --env 
EAGLE_SERVER_HOST=sandbox.eagle.incubator.apache.org --name sandbox \
-        -h sandbox.eagle.incubator.apache.org --privileged=true 
apacheeagle/sandbox:latest \
+        -e KEYCHAIN= --env EAGLE_SERVER_HOST=server.eagle.incubator.apache.org 
--name sandbox \
+        -h server.eagle.incubator.apache.org --privileged=true 
apacheeagle/sandbox:latest \
         --tag ambari-server=true
       docker run -it --rm -e EXPECTED_HOST_COUNT=1 -e 
BLUEPRINT=hdp-singlenode-eagle --link sandbox:ambariserver\
         --entrypoint /bin/sh apacheeagle/sandbox:latest -c 
/tmp/install-cluster.sh

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/bin/eagle-docker.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/bin/eagle-docker.sh 
b/eagle-external/eagle-docker/bin/eagle-docker.sh
index d490d21..ea05c49 100755
--- a/eagle-external/eagle-docker/bin/eagle-docker.sh
+++ b/eagle-external/eagle-docker/bin/eagle-docker.sh
@@ -18,13 +18,13 @@
 # NOTICE: This script is developed and maintained by Apache Eagle community 
under Apache Softwarw Foundation but not from official Docker product or 
community.
 
 EAGLE_DOCKER_VERSION=latest
-EAGLE_DOCKER_NAME=apacheeagle/sandbox
 EAGLE_DOCKER_PREFIX=sandbox
+EAGLE_DOCKER_NAME=apacheeagle/$EAGLE_DOCKER_PREFIX
 
 export NODE_NUM=1
 
 cd `dirname $0`/../
-source bin/eagle-lib.sh        
+source bin/eagle-lib.sh
 
 function check_env(){
        which docker 1>/dev/null 2>&1
@@ -66,56 +66,57 @@ function usage() {
 
 function build(){
        check_env
+
+       # bringing it to a parent level
+       DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+       cd $DIR/../../../
+
        # ==========================================
        # Check Eagle Docker Image
        # ==========================================
-       echo "[1/3] Validate Environment"
-       # echo "Checking eagle docker image ..."
+       echo "[1/3] Validating Environment"
        docker images | grep $EAGLE_DOCKER_NAME 1>/dev/null 2>&1
        if [ $? == 0 ];then
                echo "Eagle docker image already exists:"
                echo ""
                docker images | grep $EAGLE_DOCKER_NAME
                echo ""
-               echo "Rebuild it now [Y/N]? "
+               echo "do you want to re-build it [Y/N]? "
                read rebuild
                if [ $rebuild == "Y" ] || [ $rebuild == "y" ];then
                        echo  "Clean eagle image"
                        clean
                else
-                       echo "Quit now"
+                       echo "exiting now, since eagle docker image exists 
already"
                        exit 1
                fi
        else
-               echo "No existing eagle docker images, environment is ready"
+               echo "Building new eagle docker images, environment is also 
ready"
        fi      
 
        # ==========================================
        # Build Eagle Binary Package 
        # ==========================================
-       echo "[2/3] Build Eagle Binary Package"
-       # echo "Checking eagle binary package"
-       cd ../../
+       echo "[2/3] Building Eagle Binary Package"
        ls eagle-assembly/target/eagle-*-bin.tar.gz 1>/dev/null 2>&1
        if [ $? == 0 ];then
-               eagle_bin_pkg=`ls eagle-assembly/target/eagle-*-bin.tar.gz`
-               echo "Found eagle binary package at $eagle_bin_pkg"     
+               eagle_pkg=`ls eagle-assembly/target/eagle-*-bin.tar.gz`
+               echo "Found eagle binary package at $eagle_pkg"
        else
-               echo "Eagle binary package is not found"
-               echo "Build eagle binary package now"
-               # ==========================================
+           # ==========================================
                # Build Eagle Binary Package with Maven
                # ==========================================
+               echo "Packaging eagle using CMD: mvn package -DskipTests"
                echo ""
-               echo "Execute: mvn package -DskipTests "
                mvn package -DskipTests
                if [ $? == 0 ];then
-                       echo "Built successfully existing with 0"       
-                       ls eagle-assembly/target/eagle-*-bin.tar.gz 1>/dev/null 
2>&1
+                       echo "Eagle built successfully."
+                       eagle_pkg_file=$( (basename 
eagle-assembly/target/eagle-*-bin.tar.gz) 2>/dev/null )
                        if [ $? == 0 ];then
-                               eagle_bin_pkg=`ls 
eagle-assembly/target/eagle-*-bin.tar.gz`
+                           # getting path via ls command may not work in all 
env
+                               
eagle_pkg="eagle-assembly/target/$eagle_pkg_file"
                                echo ""
-                               echo "[SUCCESS] Successfully build eagle binary 
package at $eagle_bin_pkg"      
+                               echo "[SUCCESS] Successfully build eagle binary 
package at $eagle_pkg"
                        else
                                echo ""
                                echo "[FAILED] Built eagle binary package 
exiting with 0, but package is not found"
@@ -131,22 +132,24 @@ function build(){
        # =====================================
        # Build Eagle Docker Image
        # =====================================
-       echo "[3/3] Build Eagle Docker Image: $EAGLE_DOCKER_NAME"
-       echo "Extracting $eagle_bin_pkg" 
+       echo "[3/3] Building Eagle Docker Image: $EAGLE_DOCKER_NAME"
+       echo "Extracting $eagle_pkg"
        if [ -e eagle-external/eagle-docker/target ];then
                rm -rf eagle-external/eagle-docker/target
        fi
        mkdir -p eagle-external/eagle-docker/target/eagle-current
 
-       out=`tar -xzf $eagle_bin_pkg -C eagle-external/eagle-docker/target/`
+       out=`tar -xzf $eagle_pkg -C eagle-external/eagle-docker/target/`
        if [ $? != 0 ];then
-               echo "[ERROR] Failed to execute 'tar -xzf $eagle_bin_pkg -C 
eagle-external/eagle-docker/target/': $out" 1>&2 
+               echo "[ERROR] Failed to execute 'tar -xzf $eagle_pkg -C 
eagle-external/eagle-docker/target/': $out" 1>&2
                exit 1
        fi
        mv eagle-external/eagle-docker/target/eagle-*/* 
eagle-external/eagle-docker/target/eagle-current
+       mkdir eagle-external/eagle-docker/target/eagle-current/lib/common
+       cp eagle-external/eagle-log4jkafka/target/eagle-log4jkafka-*.jar 
eagle-external/eagle-docker/target/eagle-current/lib/common/
 
-       echo "Execute: docker build -t $EAGLE_DOCKER_NAME ."
-        cd eagle-external/eagle-docker
+       echo "Executing: docker build -t $EAGLE_DOCKER_NAME ."
+    cd eagle-external/eagle-docker
        docker build -t $EAGLE_DOCKER_NAME .
        
        if [ $? == 0 ];then
@@ -230,6 +233,8 @@ function clean(){
 
 function deploy(){
        check_env
+       # TODO: should check if all required ports are not already binded,
+       # so that it does not fail after creating first container.
        if [ "$NODE_NUM" == "" ];then
                export NODE_NUM=1
        fi 

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/bin/eagle-lib.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/bin/eagle-lib.sh 
b/eagle-external/eagle-docker/bin/eagle-lib.sh
old mode 100755
new mode 100644
index c988866..7cbdd3c
--- a/eagle-external/eagle-docker/bin/eagle-lib.sh
+++ b/eagle-external/eagle-docker/bin/eagle-lib.sh
@@ -18,10 +18,10 @@
 : ${VERSION:=latest}
 : ${IMAGE:="apacheeagle/sandbox:${VERSION}"}
 
-: ${NODE_PREFIX:=sandbox}
+: ${NODE_PREFIX:=server}
 : ${AMBARI_SERVER_NAME:=${NODE_PREFIX}}
-: ${MYDOMAIN:=eagle.incubator.apache.org}
-: ${DOCKER_OPTS:="--dns 127.0.0.1 --entrypoint 
/usr/local/serf/bin/start-serf-agent.sh -e KEYCHAIN=$KEYCHAIN --env 
EAGLE_SERVER_HOST=${AMBARI_SERVER_NAME}.${MYDOMAIN}"}
+: ${MYDOMAIN:=eagle.apache.org}
+: ${DOCKER_OPTS:="--dns 127.0.0.1 --entrypoint 
/usr/local/serf/bin/start-serf-agent.sh -e KEYCHAIN=$KEYCHAIN --env 
EAGLE_SERVER_HOST=${AMBARI_SERVER_NAME}.${MYDOMAIN} --env 
SERF_TAG_AMBARI_SERVER=true"}
 : ${CLUSTER_SIZE:=1}
 : ${DEBUG:=1}
 : ${SLEEP_TIME:=2}
@@ -98,7 +98,7 @@ _amb_run_shell() {
   : ${COMMAND:? required}
   get-ambari-server-ip
   NODES=$(docker inspect --format="{{.Config.Image}} {{.Name}}" $(docker ps 
-q)|grep $IMAGE|grep $NODE_PREFIX|wc -l|xargs)
-  run-command docker run -it --rm -e EXPECTED_HOST_COUNT=$NODES -e 
BLUEPRINT=$BLUEPRINT --link ${AMBARI_SERVER_NAME}:ambariserver --entrypoint 
/bin/sh $IMAGE -c $COMMAND
+  run-command docker run -it --rm -e EXPECTED_HOST_COUNT=$NODES -e 
BLUEPRINT=$BLUEPRINT --env EAGLE_SERVER_HOST=${AMBARI_SERVER_NAME}.${MYDOMAIN} 
--link ${AMBARI_SERVER_NAME}:ambariserver --entrypoint /bin/sh $IMAGE -c 
$COMMAND
 }
 
 amb-shell() {
@@ -122,7 +122,7 @@ eagle-deploy-cluster() {
 }
 
 amb-start-first() {
-  run-command docker run -p 9099:9099 -p 8080:8080 -p 8744:8744 -p 2181:2181 
-p 2888:2888 -p 6667:6667 -p 60020:60020 -p 60030:60030 -p 60010:60010 -d 
$DOCKER_OPTS --name $AMBARI_SERVER_NAME -h $AMBARI_SERVER_NAME.$MYDOMAIN 
--privileged=true $IMAGE --tag ambari-server=true
+  run-command docker run -m 5g --cpus 0.000 --memory-swap -1 -p 9090:9090 -p 
8080:8080 -p 8744:8744 -p 2181:2181 -p 2888:2888 -p 6667:6667 -p 60020:60020 -p 
60030:60030 -p 60010:60010 -p 50070:50070 -d $DOCKER_OPTS --name 
$AMBARI_SERVER_NAME -h $AMBARI_SERVER_NAME.$MYDOMAIN --privileged=true $IMAGE 
--tag ambari-server=true
 }
 
 amb-copy-to-hdfs() {
@@ -160,4 +160,4 @@ amb-start-node() {
     MORE_OPTIONS="$@"
   fi
   run-command docker run $MORE_OPTIONS -e SERF_JOIN_IP=$AMBARI_SERVER_IP 
$DOCKER_OPTS --name ${NODE_PREFIX}_$NUMBER -h 
${NODE_PREFIX}_${NUMBER}.$MYDOMAIN $IMAGE --log-level debug
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/resource/deploy-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/deploy-eagle.sh 
b/eagle-external/eagle-docker/resource/deploy-eagle.sh
index a1e5a19..c0de256 100755
--- a/eagle-external/eagle-docker/resource/deploy-eagle.sh
+++ b/eagle-external/eagle-docker/resource/deploy-eagle.sh
@@ -35,51 +35,39 @@ echo ""
 echo "Eagle home folder path is $EAGLE_HOME"
 cd $EAGLE_HOME
 
-
+# hbase tables will be autocreated now
 #Initializing Eagle Service ...
-sh ./bin/eagle-service-init.sh
-
-sleep 10
+#sh ./bin/eagle-service-init.sh
+#
+#sleep 10
 
 #Starting Eagle Service ...
-sh ./bin/eagle-service.sh start
+sh ./bin/eagle-server.sh start
 
 sleep 10
 
 echo "Creating kafka topics for eagle ... "
 KAFKA_HOME=/usr/hdp/current/kafka-broker
 EAGLE_ZOOKEEPER_QUORUM=$EAGLE_SERVER_HOST:2181
-topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper 
$EAGLE_ZOOKEEPER_QUORUM --topic sandbox_hdfs_audit_log`
+KAFKA_TOPIC_NAME=sandbox_hdfs_audit_log
+topic=`${KAFKA_HOME}/bin/kafka-topics.sh --list --zookeeper 
$EAGLE_ZOOKEEPER_QUORUM --topic $KAFKA_TOPIC_NAME`
 if [ -z $topic ]; then
-        $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper 
$EAGLE_ZOOKEEPER_QUORUM --replication-factor 1 --partitions 1 --topic 
sandbox_hdfs_audit_log
-fi
-
-if [ $? = 0 ]; then
-echo "==> Create kafka topic successfully for eagle"
+        $KAFKA_HOME/bin/kafka-topics.sh --create --zookeeper 
$EAGLE_ZOOKEEPER_QUORUM --replication-factor 1 --partitions 1 --topic 
$KAFKA_TOPIC_NAME
+        if [ $? = 0 ]; then
+            echo "==> Created kafka topic: $KAFKA_TOPIC_NAME successfully for 
eagle"
+        else
+            echo "==> Failed to create required topic: $KAFKA_TOPIC_NAME, 
exiting"
+            exit 1
+        fi
 else
-echo "==> Failed, exiting"
-exit 1
+    echo "==> Kafka topic:$KAFKA_TOPIC_NAME already exists for eagl"
 fi
 
 EAGLE_NIMBUS_HOST=$EAGLE_SERVER_HOST
 EAGLE_SERVICE_HOST=$EAGLE_SERVER_HOST
-EAGLE_TOPOLOGY_JAR=`ls 
${EAGLE_HOME}/lib/topology/eagle-topology-*-assembly.jar`
-
-${EAGLE_HOME}/bin/eagle-topology-init.sh
-[ $? != 0 ] && exit 1
-${EAGLE_HOME}/examples/sample-sensitivity-resource-create.sh
-[ $? != 0 ] && exit 1
-${EAGLE_HOME}/examples/sample-policy-create.sh
-[ $? != 0 ] && exit 1
-storm jar $EAGLE_TOPOLOGY_JAR 
org.apache.eagle.security.auditlog.HdfsAuditLogProcessorMain -D 
config.file=${EAGLE_HOME}/conf/sandbox-hdfsAuditLog-application.conf  -D 
eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
-[ $? != 0 ] && exit 1
-storm jar $EAGLE_TOPOLOGY_JAR 
org.apache.eagle.security.hive.jobrunning.HiveJobRunningMonitoringMain -D 
config.file=${EAGLE_HOME}/conf/sandbox-hiveQueryLog-application.conf  -D 
eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
-[ $? != 0 ] && exit 1
-storm jar $EAGLE_TOPOLOGY_JAR 
org.apache.eagle.security.userprofile.UserProfileDetectionMain -D 
config.file=${EAGLE_HOME}/conf/sandbox-userprofile-topology.conf  -D 
eagleProps.eagleService.host=$EAGLE_SERVICE_HOST
-[ $? != 0 ] && exit 1
+EAGLE_TOPOLOGY_JAR=`ls ${EAGLE_HOME}/lib/eagle-topology-*-assembly.jar`
 
 # TODO: More eagle start
 
 echo "Eagle is deployed successfully!"
-
-echo "Please visit http://<container_ip>:9099/eagle-service to play with 
Eagle!"
+echo "Please visit http://$EAGLE_SERVER_HOST:9090 to play with Eagle!"

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/resource/eagle-multinode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-multinode.json 
b/eagle-external/eagle-docker/resource/eagle-multinode.json
index 4c1071a..dfa5145 100644
--- a/eagle-external/eagle-docker/resource/eagle-multinode.json
+++ b/eagle-external/eagle-docker/resource/eagle-multinode.json
@@ -12,14 +12,14 @@
     {
       "hadoop-env": {
         "properties": {
-          "content": "\r\n# Set Hadoop-specific environment variables 
here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others 
are\r\n# optional.  When running a distributed configuration it is best to\r\n# 
set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote 
nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport 
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop 
home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# 
Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# 
Path to jsvc required by secure HDP 2.0 datanode\r\nexport 
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. 
Default is 1000.\r\nexport 
HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java 
runtime options.  Empty by default.\r\nexport 
HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true 
 ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS 
when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT 
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%
 M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps 
-XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport 
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m 
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m 
-Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_O
 PTS}\"\r\n\r\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following 
applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as 
after dropping privileges\r\nexport 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
 Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o 
ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are 
stored.  $HADOOP_HOME/logs by default.\r\nexport 
HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server 
logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# 
Where log files are stored in the secure data environment.\r\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# expo
 rt HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop 
code should be rsync'd from.  Unset by default.\r\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between 
slave commands.  Unset by default.  This\r\n# can be useful in large clusters, 
where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can 
service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where 
pid files are stored. /tmp by default.\r\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 History server pid\r\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
 A string representing this instance of hadoop. $USER by default.\r\nexport 
HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon 
processes.  See 'man 
 nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard 
classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql 
connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required 
by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 
2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# 
Add libraries required by 
nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n#
 added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; 
then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned 
RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n 
   export 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-cl
 ient/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command 
line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly 
required for hadoop 2.0\r\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport 
HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
+          "content": "\r\n# Set Hadoop-specific environment variables 
here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others 
are\r\n# optional.  When running a distributed configuration it is best to\r\n# 
set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote 
nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport 
JAVA_HOME=/usr/java/default\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# 
Hadoop home directory\r\nexport 
HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# Hadoop Configuration 
Directory\r\n\r\n{# this is different for HDP1 #}\r\n# Path to jsvc required by 
secure HDP 2.0 datanode\r\nexport JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The 
maximum amount of heap to use, in MB. Default is 1000.\r\nexport 
HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java 
runtime options.  Empty by default.\r\nexport 
HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=t
 rue ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS 
when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT 
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%
 d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps 
-XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport 
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m 
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m 
-Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANC
 ER_OPTS}\"\r\n\r\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following 
applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as 
after dropping privileges\r\nexport 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
 Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o 
ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are 
stored.  $HADOOP_HOME/logs by default.\r\nexport 
HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server 
logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# 
Where log files are stored in the secure data environment.\r\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# 
 export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where 
hadoop code should be rsync'd from.  Unset by default.\r\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between 
slave commands.  Unset by default.  This\r\n# can be useful in large clusters, 
where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can 
service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where 
pid files are stored. /tmp by default.\r\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 History server pid\r\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
 A string representing this instance of hadoop. $USER by default.\r\nexport 
HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon 
processes.  See '
 man nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from 
standard classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql 
connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required 
by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 
2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# 
Add libraries required by 
nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/common/*:/usr/hdp/current/kafka-broker/libs/*\r\n\r\n#
 added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; 
then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned 
RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n 
   export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez
 -client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\r\n  
fi\r\nfi\r\n\r\n# Setting path to hdfs command line\r\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly required for hadoop 
2.0\r\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport 
HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
         }
       }
     },
     {
       "hdfs-log4j": {
         "properties": {
-          "content": "\r\n#\r\n# Licensed to the Apache Software Foundation 
(ASF) under one\r\n# or more contributor license agreements.  See the NOTICE 
file\r\n# distributed with this work for additional information\r\n# regarding 
copyright ownership.  The ASF licenses this file\r\n# to you under the Apache 
License, Version 2.0 (the\r\n# \"License\"); you may not use this file except 
in compliance\r\n# with the License.  You may obtain a copy of the License 
at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless 
required by applicable law or agreed to in writing,\r\n# software distributed 
under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES 
OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for 
the\r\n# specific language governing permissions and limitations\r\n# under the 
License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by 
system properties\r\n# To change daemon root logger use hadoop
 _root_logger in 
hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
 Define the root logger to the system property 
\"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# 
Daily Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 
30-day 
backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\r\n# Debugging Pattern 
format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlogg
 er above if you want to use 
this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default 
values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\n
 
#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
 hdfs audit lo
 
gging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=sandbox.eagle.incubator.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.
 Layout.ConversionPattern=%d{ISO8601} %p %c{2}: 
%m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
 mapred audit 
logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# 
Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\n
 
log4j.appender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging 
levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
 Jets3t 
library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
 Null Appender\r\n# Trap security logger on the hadoop client 
side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
 Event Counter Appender\r\n# Sends counts of logging messages at different 
severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=
 org.apache.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" 
messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
 HDFS block state change log from block manager\r\n#\r\n# Uncomment the 
following to suppress normal block state change\r\n# messages from BlockManager 
in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+          "content": "\r\n#\r\n# Licensed to the Apache Software Foundation 
(ASF) under one\r\n# or more contributor license agreements.  See the NOTICE 
file\r\n# distributed with this work for additional information\r\n# regarding 
copyright ownership.  The ASF licenses this file\r\n# to you under the Apache 
License, Version 2.0 (the\r\n# \"License\"); you may not use this file except 
in compliance\r\n# with the License.  You may obtain a copy of the License 
at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless 
required by applicable law or agreed to in writing,\r\n# software distributed 
under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES 
OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for 
the\r\n# specific language governing permissions and limitations\r\n# under the 
License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by 
system properties\r\n# To change daemon root logger use hadoop
 _root_logger in 
hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
 Define the root logger to the system property 
\"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# 
Daily Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 
30-day 
backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\r\n# Debugging Pattern 
format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlogg
 er above if you want to use 
this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default 
values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\n
 
#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
 hdfs audit lo
 
gging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=server.eagle.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.Conv
 ersionPattern=%d{ISO8601} %p %c{2}: 
%m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
 mapred audit 
logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# 
Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Logfile size and and 30-day 
backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\nlog4j.appen
 
der.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging 
levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
 Jets3t 
library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
 Null Appender\r\n# Trap security logger on the hadoop client 
side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
 Event Counter Appender\r\n# Sends counts of logging messages at different 
severity levels to Hadoop 
Metrics.\r\n#\r\nlog4j.appender.EventCounter=org.apache.
 hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" 
messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
 HDFS block state change log from block manager\r\n#\r\n# Uncomment the 
following to suppress normal block state change\r\n# messages from BlockManager 
in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/resource/eagle-singlenode.json
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/eagle-singlenode.json 
b/eagle-external/eagle-docker/resource/eagle-singlenode.json
index d070689..ac822fd 100644
--- a/eagle-external/eagle-docker/resource/eagle-singlenode.json
+++ b/eagle-external/eagle-docker/resource/eagle-singlenode.json
@@ -10,16 +10,26 @@
       }
     },
     {
+      "hbase-site": {
+        "properties" : {
+          "zookeeper.znode.parent" : "/hbase-unsecure",
+          "hbase.zookeeper.property.clientPort" : "2181",
+          "hbase.rootdir" : "hdfs://server.eagle.apache.org:8020/hbase",
+          "hbase.cluster.distributed" : "true"
+        }
+      }
+    },
+    {
       "hadoop-env": {
         "properties": {
-          "content": "\r\n# Set Hadoop-specific environment variables 
here.\r\n\r\n# The only required environment variable is JAVA_HOME.  All others 
are\r\n# optional.  When running a distributed configuration it is best to\r\n# 
set JAVA_HOME in this file, so that it is correctly defined on\r\n# remote 
nodes.\r\n\r\n# The java implementation to use.  Required.\r\nexport 
JAVA_HOME={{java_home}}\r\nexport HADOOP_HOME_WARN_SUPPRESS=1\r\n\r\n# Hadoop 
home directory\r\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\r\n\r\n# 
Hadoop Configuration Directory\r\n\r\n{# this is different for HDP1 #}\r\n# 
Path to jsvc required by secure HDP 2.0 datanode\r\nexport 
JSVC_HOME={{jsvc_path}}\r\n\r\n\r\n# The maximum amount of heap to use, in MB. 
Default is 1000.\r\nexport 
HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\r\n\r\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\r\n\r\n# Extra Java 
runtime options.  Empty by default.\r\nexport 
HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true 
 ${HADOOP_OPTS}\"\r\n\r\n# Command specific options appended to HADOOP_OPTS 
when specified\r\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT 
${HADOOP_NAMENODE_OPTS}\"\r\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%
 M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps 
-XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\r\n\r\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\r\nexport 
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m 
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m 
-Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_DATANODE_OPTS}\"\r\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_O
 PTS}\"\r\n\r\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\r\n\r\n# The following 
applies to multiple commands (fs, dfs, fsck, distcp etc)\r\nexport 
HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS\"\r\n\r\n# On secure datanodes, user to run the datanode as 
after dropping privileges\r\nexport 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\r\n\r\n#
 Extra ssh options.  Empty by default.\r\nexport HADOOP_SSH_OPTS=\"-o 
ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\r\n\r\n# Where log files are 
stored.  $HADOOP_HOME/logs by default.\r\nexport 
HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\r\n\r\n# History server 
logs\r\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\r\n\r\n# 
Where log files are stored in the secure data environment.\r\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\r\n# expo
 rt HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\r\n\r\n# host:path where hadoop 
code should be rsync'd from.  Unset by default.\r\n# export 
HADOOP_MASTER=master:/home/$USER/src/hadoop\r\n\r\n# Seconds to sleep between 
slave commands.  Unset by default.  This\r\n# can be useful in large clusters, 
where, e.g., slave rsyncs can\r\n# otherwise arrive faster than the master can 
service them.\r\n# export HADOOP_SLAVE_SLEEP=0.1\r\n\r\n# The directory where 
pid files are stored. /tmp by default.\r\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\r\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\r\n\r\n#
 History server pid\r\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\r\n\r\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\r\n\r\n#
 A string representing this instance of hadoop. $USER by default.\r\nexport 
HADOOP_IDENT_STRING=$USER\r\n\r\n# The scheduling priority for daemon 
processes.  See 'man 
 nice'.\r\n\r\n# export HADOOP_NICENESS=10\r\n\r\n# Use libraries from standard 
classpath\r\nJAVA_JDBC_LIBS=\"\"\r\n#Add libraries required by mysql 
connector\r\nfor jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`\r\ndo\r\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# Add libraries required 
by oracle connector\r\nfor jarFile in `ls /usr/share/java/*ojdbc* 
2>/dev/null`\r\ndo\r\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\r\ndone\r\n# 
Add libraries required by 
nodemanager\r\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\r\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/log4jkafka/lib/*\r\n\r\n#
 added to the HADOOP_CLASSPATH\r\nif [ -d \"/usr/hdp/current/tez-client\" ]; 
then\r\n  if [ -d \"/etc/tez/conf/\" ]; then\r\n    # When using versioned 
RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\r\n 
   export 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-cl
 ient/lib/*:/etc/tez/conf/\r\n  fi\r\nfi\r\n\r\n# Setting path to hdfs command 
line\r\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\r\n\r\n# Mostly 
required for hadoop 2.0\r\nexport 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\r\n\r\nexport 
HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\r\n"
+          "content": "\n# Set Hadoop-specific environment variables here.\n\n# 
The only required environment variable is JAVA_HOME.  All others are\n# 
optional.  When running a distributed configuration it is best to\n# set 
JAVA_HOME in this file, so that it is correctly defined on\n# remote 
nodes.\n\n# The java implementation to use.  Required.\nexport 
JAVA_HOME=/usr/java/default\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop 
home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop 
Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc 
required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The 
maximum amount of heap to use, in MB. Default is 1000.\nexport 
HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport 
HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java 
runtime options.  Empty by default.\nexport 
HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command 
specific options appen
 ded to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS 
-Dhdfs.audit.logger=INFO,DRFAAUDIT,KAFKA_HDFS_AUDIT 
${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server 
-XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -
 XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA 
${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server 
-Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console 
-Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport 
HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC 
-XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m 
-XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m 
-Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server 
-Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport 
HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS
 \n\n# The following applies to multiple commands (fs, dfs, fsck, distcp 
etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as 
after dropping privileges\nexport 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# 
Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o 
ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  
$HADOOP_HOME/logs by default.\nexport 
HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport 
HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are 
stored in the secure data environment.\nexport 
HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export 
HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code 
should be rsync'd from.  Unset by def
 ault.\n# export HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to 
sleep between slave commands.  Unset by default.  This\n# can be useful in 
large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than 
the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The 
directory where pid files are stored. /tmp by default.\nexport 
HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# 
History server pid\nexport 
HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n#
 A string representing this instance of hadoop. $USER by default.\nexport 
HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  
See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard 
classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor 
jarFile
  in `ls /usr/share/java/*mysql* 2>/dev/null`\ndo\n  
JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by 
oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n 
 JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by 
nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}:/usr/hdp/current/eagle/lib/common/*:/usr/hdp/current/kafka-broker/libs/*\n\n#
 added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; 
then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, 
the tez-client will be a symlink to the current folder of tez in HDP.\n    
export 
HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n
  fi\nfi\n\n# Setting path to hdfs command line\nexport 
HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 
2.0\nexport JAVA_LIB
 RARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport 
HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"\n"
         }
       }
     },
     {
       "hdfs-log4j": {
         "properties": {
-          "content": "\r\n#\r\n# Licensed to the Apache Software Foundation 
(ASF) under one\r\n# or more contributor license agreements.  See the NOTICE 
file\r\n# distributed with this work for additional information\r\n# regarding 
copyright ownership.  The ASF licenses this file\r\n# to you under the Apache 
License, Version 2.0 (the\r\n# \"License\"); you may not use this file except 
in compliance\r\n# with the License.  You may obtain a copy of the License 
at\r\n#\r\n#  http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless 
required by applicable law or agreed to in writing,\r\n# software distributed 
under the License is distributed on an\r\n# \"AS IS\" BASIS, WITHOUT WARRANTIES 
OR CONDITIONS OF ANY\r\n# KIND, either express or implied.  See the License for 
the\r\n# specific language governing permissions and limitations\r\n# under the 
License.\r\n#\r\n\r\n\r\n# Define some default values that can be overridden by 
system properties\r\n# To change daemon root logger use hadoop
 _root_logger in 
hadoop-env\r\nhadoop.root.logger=INFO,console\r\nhadoop.log.dir=.\r\nhadoop.log.file=hadoop.log\r\n\r\n\r\n#
 Define the root logger to the system property 
\"hadoop.root.logger\".\r\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\r\n\r\n# Logging Threshold\r\nlog4j.threshhold=ALL\r\n\r\n#\r\n# 
Daily Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Rollver at midnight\r\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\r\n\r\n# 
30-day 
backup\r\n#log4j.appender.DRFA.MaxBackupIndex=30\r\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\r\n\r\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\r\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\r\n# Debugging Pattern 
format\r\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\r\n\r\n\r\n#\r\n# console\r\n# Add \"console\" to rootlogg
 er above if you want to use 
this\r\n#\r\n\r\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\r\nlog4j.appender.console.target=System.err\r\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\r\n\r\n#\r\n# TaskLog Appender\r\n#\r\n\r\n#Default 
values\r\nhadoop.tasklog.taskid=null\r\nhadoop.tasklog.iscleanup=false\r\nhadoop.tasklog.noKeepSplits=4\r\nhadoop.tasklog.totalLogFileSize=100\r\nhadoop.tasklog.purgeLogSplits=true\r\nhadoop.tasklog.logsRetainHours=12\r\n\r\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\r\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\r\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\r\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\r\n\r\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\r\n\r\n#\r\n#Security audit appender\r\n
 
#\r\nhadoop.security.logger=INFO,console\r\nhadoop.security.log.maxfilesize=256MB\r\nhadoop.security.log.maxbackupindex=20\r\nlog4j.category.SecurityLogger=${hadoop.security.logger}\r\nhadoop.security.log.file=SecurityAuth.audit\r\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\r\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\r\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\r\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\r\n\r\n#\r\n#
 hdfs audit lo
 
gging\r\n#\r\nhdfs.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\r\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\r\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\r\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=sandbox.eagle.incubator.apache.org:6667\r\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\r\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.KAFKA_HDFS_AUDIT.
 Layout.ConversionPattern=%d{ISO8601} %p %c{2}: 
%m%n\r\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\r\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\r\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\r\n\r\n#\r\n#
 mapred audit 
logging\r\n#\r\nmapred.audit.logger=INFO,console\r\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\r\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\r\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\r\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\r\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\r\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\r\n\r\n#\r\n# 
Rolling File 
Appender\r\n#\r\n\r\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\r\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\r\n\r\n#
 Logfile size and and 30-day backups\r\nlog4j.appender.RFA.MaxFileSize=256MB\r\n
 
log4j.appender.RFA.MaxBackupIndex=10\r\n\r\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\r\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\r\n\r\n\r\n# Custom Logging 
levels\r\n\r\nhadoop.metrics.log.level=INFO\r\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\r\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\r\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\r\n\r\n#
 Jets3t 
library\r\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\r\n\r\n#\r\n#
 Null Appender\r\n# Trap security logger on the hadoop client 
side\r\n#\r\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\r\n\r\n#\r\n#
 Event Counter Appender\r\n# Sends counts of logging messages at different 
severity levels to Hadoop Metrics.\r\n#\r\nlog4j.appender.EventCounter=
 org.apache.hadoop.log.metrics.EventCounter\r\n\r\n# Removes \"deprecated\" 
messages\r\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\r\n\r\n#\r\n#
 HDFS block state change log from block manager\r\n#\r\n# Uncomment the 
following to suppress normal block state change\r\n# messages from BlockManager 
in NameNode.\r\n#log4j.logger.BlockStateChange=WARN\r\n"
+          "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) 
under one\n# or more contributor license agreements.  See the NOTICE file\n# 
distributed with this work for additional information\n# regarding copyright 
ownership.  The ASF licenses this file\n# to you under the Apache License, 
Version 2.0 (the\n# \"License\"); you may not use this file except in 
compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  
http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable 
law or agreed to in writing,\n# software distributed under the License is 
distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 
ANY\n# KIND, either express or implied.  See the License for the\n# specific 
language governing permissions and limitations\n# under the License.\n#\n\n\n# 
Define some default values that can be overridden by system properties\n# To 
change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logg
 er=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define 
the root logger to the system property 
\"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, 
EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling 
File 
Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day 
backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n#
 Pattern format: Date LogLevel LoggerName 
LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: 
%m%n\n# Debugging Pattern 
format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if 
you want to use 
this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.c
 
onsole.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd
 HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default 
values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601}
 %p %c: %m%n\n\n#\n#Security audit 
appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger
 
}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601}
 %p %c: 
%m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n#
 hdfs audit 
logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=fa
 
lse\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.KAFKA_HDFS_AUDIT=org.apache.eagle.log4j.kafka.KafkaLog4jAppender\nlog4j.appender.KAFKA_HDFS_AUDIT.Topic=sandbox_hdfs_audit_log\nlog4j.appender.KAFKA_HDFS_AUDIT.BrokerList=server.eagle.apache.org:6667\nlog4j.appender.KAFKA_HDFS_AUDIT.KeyClass=org.apache.eagle.log4j.kafka.hadoop.AuditLogKeyer\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout=org.apache.log4j.PatternLayout\nlog4j.appender.KAFKA_HDFS_AUDIT.Layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: 
%m%n\nlog4j.appender.KAFKA_HDFS_AUDIT.ProducerType=async\nlog4j.appender.KAFKA_HDFS_AUDIT.BatchSize=1\nlog4j.appender.KAFKA_HDFS_AUDIT.QueueSize=1\n\n#\n#
 mapred audit logging\n#\nmapred.audit.logger=INFO,con
 
sole\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601}
 %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling 
File 
Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
 Logfile size and and 30-day 
backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601}
 %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} 
%-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.l
 
og.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n#
 Jets3t 
library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n#
 Null Appender\n# Trap security logger on the hadoop client 
side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n#
 Event Counter Appender\n# Sends counts of logging messages at different 
severity levels to Hadoop 
Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n#
 Removes \"deprecated\" 
messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n#
 HDFS block state change log from block manager\n#\n# Uncomment the following 
to suppress normal block state change\n# messages from BlockManager in 
NameNode.\n#log4j.logger.BlockStateChange=WARN\n"
         }
       }
     }
@@ -29,6 +39,12 @@
       "name": "master",
       "components": [
         {
+          "name": "AMBARI_SERVER"
+        },
+        {
+          "name": "KAFKA_BROKER"
+        },
+        {
           "name": "APP_TIMELINE_SERVER"
         },
         {
@@ -50,9 +66,6 @@
           "name": "NAMENODE"
         },
         {
-          "name": "AMBARI_SERVER"
-        },
-        {
           "name": "HDFS_CLIENT"
         },
         {
@@ -101,9 +114,6 @@
           "name": "GANGLIA_MONITOR"
         },
         {
-          "name": "KAFKA_BROKER"
-        },
-        {
           "name": "DRPC_SERVER"
         },
         {
@@ -116,6 +126,7 @@
           "name": "SUPERVISOR"
         }
       ],
+      "configurations" : [ ],
       "cardinality": "1"
     }
   ],

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/resource/install-cluster.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/install-cluster.sh 
b/eagle-external/eagle-docker/resource/install-cluster.sh
index 4acd384..910a112 100755
--- a/eagle-external/eagle-docker/resource/install-cluster.sh
+++ b/eagle-external/eagle-docker/resource/install-cluster.sh
@@ -16,9 +16,9 @@
 # limitations under the License.
 
 
-export PATH=/usr/jdk64/jdk1.7.0_67/bin:$PATH
-
+export AMBARI_HOST=$EAGLE_SERVER_HOST
 
+echo "installing hadoop cluster via ambari..."
 ./ambari-shell.sh << EOF
 blueprint add --file /tmp/eagle-singlenode.json
 blueprint add --file /tmp/eagle-multinode.json
@@ -29,8 +29,4 @@ EOF
 
 clear
 
-SERF_RPC_ADDR=${AMBARISERVER_PORT_7373_TCP##*/}
-serf event --rpc-addr=$SERF_RPC_ADDR eagle
-
 ./wait-for-eagle.sh
-

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/resource/serf/bin/start-serf-agent.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/serf/bin/start-serf-agent.sh 
b/eagle-external/eagle-docker/resource/serf/bin/start-serf-agent.sh
new file mode 100755
index 0000000..dbb8df0
--- /dev/null
+++ b/eagle-external/eagle-docker/resource/serf/bin/start-serf-agent.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+SERF_HOME=/usr/local/serf
+SERF_BIN=$SERF_HOME/bin/serf
+SERF_CONFIG_DIR=$SERF_HOME/etc
+SERF_LOG_FILE=/var/log/serf.log
+SERF_INIT_DIR=/usr/local/init
+
+set -x
+
+echo "Executing scripts from $SERF_INIT_DIR" | tee $SERF_LOG_FILE
+
+for file in $SERF_INIT_DIR/*
+do
+    echo "Execute: $file" | tee -a $SERF_LOG_FILE
+    /bin/bash $file | tee -a $SERF_LOG_FILE
+done
+
+# if SERF_JOIN_IP env variable set generate a config json for serf
+[[ -n $SERF_JOIN_IP ]] && cat > $SERF_CONFIG_DIR/join.json <<EOF
+{
+  "retry_join" : ["$SERF_JOIN_IP"],
+  "retry_interval" : "5s"
+}
+EOF
+
+# by default only short hostname would be the nodename
+# we need FQDN
+# while loop for azure deployment
+unset SERF_HOSTNAME
+while [ -z "$SERF_HOSTNAME" ]; do
+  SERF_HOSTNAME=$(hostname -f 2>/dev/null)
+  sleep 5
+done;
+cat > $SERF_CONFIG_DIR/node.json <<EOF
+{
+  "node_name" : "$SERF_HOSTNAME",
+  "bind" : "$SERF_HOSTNAME"
+}
+EOF
+
+# if SERF_ADVERTISE_IP env variable set generate a advertise.json for serf to 
advertise the given IP
+[[ -n $SERF_ADVERTISE_IP ]] && cat > $SERF_CONFIG_DIR/advertise.json <<EOF
+{
+  "advertise" : "$SERF_ADVERTISE_IP"
+}
+EOF
+
+$SERF_BIN agent -config-dir $SERF_CONFIG_DIR $@ | tee -a $SERF_LOG_FILE
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-external/eagle-docker/resource/wait-for-eagle.sh
----------------------------------------------------------------------
diff --git a/eagle-external/eagle-docker/resource/wait-for-eagle.sh 
b/eagle-external/eagle-docker/resource/wait-for-eagle.sh
index 3b6ab11..db614e6 100755
--- a/eagle-external/eagle-docker/resource/wait-for-eagle.sh
+++ b/eagle-external/eagle-docker/resource/wait-for-eagle.sh
@@ -15,24 +15,28 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-: ${EAGLE_HOST:=$AMBARISERVER_PORT_9099_TCP_ADDR}
+#: ${EAGLE_HOST:=$AMBARISERVER_PORT_9099_TCP_ADDR}
+: ${EAGLE_HOST:=$EAGLE_SERVER_HOST}
 : ${SLEEP:=2}
 : ${DEBUG:=1}
 
-: ${EAGLE_HOST:? eagle server address is mandatory, fallback is a linked 
containers exposed 9099}
+: ${EAGLE_HOST:? eagle server address is mandatory, fallback is a linked 
containers exposed 9090}
 
 debug() {
   [ $DEBUG -gt 0 ] && echo [DEBUG] "$@" 1>&2
 }
 
 get-server-state() {
-  curl -s -o /dev/null -w "%{http_code}" 
$AMBARISERVER_PORT_9099_TCP_ADDR:9099/eagle-service/index.html
+  curl -s -o /dev/null -w "%{http_code}" $EAGLE_HOST:9090
 }
 
-debug waits for eagle to start on: $EAGLE_HOST
+SERF_RPC_ADDR=${EAGLE_SERVER_HOST}:7373
+serf event --rpc-addr=$SERF_RPC_ADDR eagle
+
+debug waiting for eagle to start on: $EAGLE_HOST
 while ! get-server-state | grep 200 &>/dev/null ; do
   [ $DEBUG -gt 0 ] && echo -n .
   sleep $SLEEP
 done
 [ $DEBUG -gt 0 ] && echo
-debug eagle web started: $EAGLE_HOST:9099/eagle-service
+debug eagle web started: $EAGLE_HOST:9090

http://git-wip-us.apache.org/repos/asf/eagle/blob/39ab83e6/eagle-server/src/main/resources/application.conf
----------------------------------------------------------------------
diff --git a/eagle-server/src/main/resources/application.conf 
b/eagle-server/src/main/resources/application.conf
index d657f54..a1f13bb 100644
--- a/eagle-server/src/main/resources/application.conf
+++ b/eagle-server/src/main/resources/application.conf
@@ -65,6 +65,9 @@ storage {
     # eagle coprocessor enabled or not: [true, false]
     # default is false
     coprocessorEnabled = false
+
+    # Autocreate hbase tables
+    autoCreateTable = true
   }
 }
 

Reply via email to