http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/PartitionStrategyImpl.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/PartitionStrategyImpl.java
 
b/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/PartitionStrategyImpl.java
deleted file mode 100644
index eacefd5..0000000
--- 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/PartitionStrategyImpl.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- *    Licensed to the Apache Software Foundation (ASF) under one or more
- *    contributor license agreements.  See the NOTICE file distributed with
- *    this work for additional information regarding copyright ownership.
- *    The ASF licenses this file to You under the Apache License, Version 2.0
- *    (the "License"); you may not use this file except in compliance with
- *    the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- *
- */
-
-package org.apache.eagle.partition;
-
-import org.apache.commons.lang3.time.DateUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.Map;
-
-public class PartitionStrategyImpl implements PartitionStrategy {
-
-    public DataDistributionDao dao;
-    public PartitionAlgorithm algorithm;
-    public Map<String, Integer> routingTable;
-    public long lastRefreshTime;
-    public long refreshInterval;
-    public long timeRange;
-    public static long DEFAULT_TIME_RANGE = 2 * DateUtils.MILLIS_PER_DAY;
-    public static long DEFAULT_REFRESH_INTERVAL = 2 * 
DateUtils.MILLIS_PER_HOUR;
-    private final Logger LOG = 
LoggerFactory.getLogger(PartitionStrategyImpl.class);
-
-    public PartitionStrategyImpl(DataDistributionDao dao, PartitionAlgorithm 
algorithm, long refreshInterval, long timeRange) {
-        this.dao = dao;
-        this.algorithm = algorithm;
-        this.refreshInterval = refreshInterval;
-        this.timeRange = timeRange;
-    }
-
-    public PartitionStrategyImpl(DataDistributionDao dao, PartitionAlgorithm 
algorithm) {
-        this(dao, algorithm, DEFAULT_REFRESH_INTERVAL, DEFAULT_TIME_RANGE);
-    }
-
-    public boolean needRefresh() {
-        if (System.currentTimeMillis() > lastRefreshTime + refreshInterval) {
-            lastRefreshTime = System.currentTimeMillis();
-            return true;
-        }
-        return false;
-    }
-
-    public Map<String, Integer> generateRoutingTable(int buckNum) {
-        try {
-            long currentTime = System.currentTimeMillis();
-            List<Weight> weights = dao.fetchDataDistribution(currentTime - 
timeRange, currentTime);
-            routingTable = algorithm.partition(weights, buckNum);
-            return routingTable;
-        }
-        catch (Exception ex) {
-            throw new RuntimeException(ex);
-        }
-    }
-
-    @Override
-    public int balance(String key, int buckNum) {
-        if (needRefresh()) {
-            LOG.info("Going to refresh routing table");
-            routingTable = generateRoutingTable(buckNum);
-            LOG.info("Finish refresh routing table");
-        }
-        if (routingTable.containsKey(key)) {
-            return routingTable.get(key);
-        }
-        else {
-            return Math.abs(key.hashCode()) % buckNum;
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/Weight.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/Weight.java
 
b/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/Weight.java
deleted file mode 100644
index 14d005d..0000000
--- 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/java/org/apache/eagle/partition/Weight.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- *
- *    Licensed to the Apache Software Foundation (ASF) under one or more
- *    contributor license agreements.  See the NOTICE file distributed with
- *    this work for additional information regarding copyright ownership.
- *    The ASF licenses this file to You under the Apache License, Version 2.0
- *    (the "License"); you may not use this file except in compliance with
- *    the License.  You may obtain a copy of the License at
- *
- *       http://www.apache.org/licenses/LICENSE-2.0
- *
- *    Unless required by applicable law or agreed to in writing, software
- *    distributed under the License is distributed on an "AS IS" BASIS,
- *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *    See the License for the specific language governing permissions and
- *    limitations under the License.
- *
- */
-
-package org.apache.eagle.partition;
-
-public class Weight {
-    public String key;
-    public Double value;
-
-    public Weight(String key, Double value) {
-        this.key = key;
-        this.value = value;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/Collector.scala
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/Collector.scala
 
b/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/Collector.scala
deleted file mode 100644
index 5361001..0000000
--- 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/Collector.scala
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.eagle.datastream
-
-trait Collector[-R] {
-  def collect(r : R);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/FlatMapper.scala
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/FlatMapper.scala
 
b/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/FlatMapper.scala
deleted file mode 100644
index ddea46f..0000000
--- 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/FlatMapper.scala
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.eagle.datastream
-
-trait FlatMapper[T] extends Serializable {
-  def flatMap(input : Seq[AnyRef], collector : Collector[T])
-}
-
-case class FlatMapperWrapper[T](func:(Any,Collector[T]) => Unit) extends 
FlatMapper[T]{
-  override def flatMap(input: Seq[AnyRef], collector: Collector[T]): Unit = 
func(input,collector)
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/StormStreamExecutor.scala
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/StormStreamExecutor.scala
 
b/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/StormStreamExecutor.scala
deleted file mode 100644
index 836c7eb..0000000
--- 
a/eagle-core/eagle-data-process/eagle-stream-process-base/src/main/scala/org/apache/eagle/datastream/StormStreamExecutor.scala
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.eagle.datastream
-
-import com.typesafe.config.Config
-import scala.collection.JavaConverters._
-
-trait StormStreamExecutor[R <: Any] extends FlatMapper[R] {
-  def prepareConfig(config : Config)
-  def init
-  def fields : Array[String]
-}
-
-trait JavaStormStreamExecutor[R <: AnyRef] extends FlatMapper[R] {
-  def prepareConfig(config : Config)
-  def init
-  def fields : Array[String]
-  override def toString() = this.getClass.getSimpleName
-
-  override def flatMap(input : Seq[AnyRef], collector : Collector[R]) = 
flatMap(input.asJava,collector)
-
-  def flatMap(input : java.util.List[AnyRef], collector : Collector[R])
-}
-
-abstract class StormStreamExecutor1[T0] extends 
StormStreamExecutor[Tuple1[T0]] {
-  override def fields = Array("f0")
-}
-
-abstract class JavaStormStreamExecutor1[T0] extends 
JavaStormStreamExecutor[Tuple1[T0]] {
-  override def fields = Array("f0")
-}
-
-abstract class  StormStreamExecutor2[T0, T1] extends 
StormStreamExecutor[Tuple2[T0, T1]] {
-  override def fields = Array("f0", "f1")
-}
-
-abstract class  JavaStormStreamExecutor2[T0, T1] extends 
JavaStormStreamExecutor[Tuple2[T0, T1]] {
-  override def fields = Array("f0", "f1")
-}
-
-abstract class  StormStreamExecutor3[T0, T1, T2] extends 
StormStreamExecutor[Tuple3[T0, T1, T2]] {
-  override def fields = Array("f0", "f1", "f2")
-}
-
-abstract class  JavaStormStreamExecutor3[T0, T1, T2] extends 
JavaStormStreamExecutor[Tuple3[T0, T1, T2]] {
-  override def fields = Array("f0", "f1", "f2")
-}
-
-abstract class  StormStreamExecutor4[T0, T1, T2, T3] extends 
StormStreamExecutor[Tuple4[T0, T1, T2, T3]] {
-  override def fields = Array("f0", "f1", "f2", "f3")
-}
-
-abstract class  JavaStormStreamExecutor4[T0, T1, T2, T3] extends 
JavaStormStreamExecutor[Tuple4[T0, T1, T2, T3]] {
-  override def fields = Array("f0", "f1", "f2", "f3")
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-data-process/pom.xml
----------------------------------------------------------------------
diff --git a/eagle-core/eagle-data-process/pom.xml 
b/eagle-core/eagle-data-process/pom.xml
deleted file mode 100644
index 90b125b..0000000
--- a/eagle-core/eagle-data-process/pom.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one or more
-  ~ contributor license agreements.  See the NOTICE file distributed with
-  ~ this work for additional information regarding copyright ownership.
-  ~ The ASF licenses this file to You under the Apache License, Version 2.0
-  ~ (the "License"); you may not use this file except in compliance with
-  ~ the License.  You may obtain a copy of the License at
-  ~
-  ~    http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>eagle</groupId>
-        <artifactId>eagle-core</artifactId>
-        <version>0.3.0</version>
-    </parent>
-    <artifactId>eagle-data-process-parent</artifactId>
-    <packaging>pom</packaging>
-    <description>Eagle Data Process Framework</description>
-    <modules>
-        <module>eagle-stream-process-base</module>
-       <module>eagle-storm-jobrunning-spout</module>
-       <module>eagle-job-common</module>
-        <module>eagle-stream-process-api</module>
-        <module>eagle-stream-pipeline</module>
-    </modules>
-</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/pom.xml
----------------------------------------------------------------------
diff --git a/eagle-core/eagle-embed/eagle-embed-hbase/pom.xml 
b/eagle-core/eagle-embed/eagle-embed-hbase/pom.xml
deleted file mode 100644
index ab9a253..0000000
--- a/eagle-core/eagle-embed/eagle-embed-hbase/pom.xml
+++ /dev/null
@@ -1,73 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one or more
-  ~ contributor license agreements.  See the NOTICE file distributed with
-  ~ this work for additional information regarding copyright ownership.
-  ~ The ASF licenses this file to You under the Apache License, Version 2.0
-  ~ (the "License"); you may not use this file except in compliance with
-  ~ the License.  You may obtain a copy of the License at
-  ~
-  ~    http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0";
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-    <parent>
-        <artifactId>eagle-embed-parent</artifactId>
-        <groupId>eagle</groupId>
-        <version>0.3.0</version>
-        <relativePath>../pom.xml</relativePath>
-    </parent>
-    <modelVersion>4.0.0</modelVersion>
-
-    <artifactId>eagle-embed-hbase</artifactId>
-    <packaging>jar</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.apache.hbase</groupId>
-            <artifactId>hbase-server</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.apache.hbase</groupId>
-            <artifactId>hbase-testing-util</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>log4j</groupId>
-            <artifactId>log4j</artifactId>
-        </dependency>
-        <!--<dependency>-->
-            <!--<groupId>org.slf4j</groupId>-->
-            <!--<artifactId>slf4j-log4j12</artifactId>-->
-        <!--</dependency>-->
-        <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>log4j-over-slf4j</artifactId>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <artifactId>maven-jar-plugin</artifactId>
-                <executions>
-                    <execution>
-                        <id>test-jar</id>
-                        <phase>test-compile</phase>
-                        <goals>
-                            <goal>test-jar</goal>
-                        </goals>
-                    </execution>
-                </executions>
-            </plugin>
-        </plugins>
-    </build>
-</project>
-

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
deleted file mode 100755
index 62b1e83..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/eagle-topology-init.sh
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with`
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-export EAGLE_SERVICE_USER=admin
-export EAGLE_SERVICE_PASSWD=secret
-export EAGLE_SERVICE_HOST=localhost
-export EAGLE_SERVICE_PORT=38080
-
-#####################################################################
-#            Import stream metadata for HDFS
-#####################################################################
-
-## AlertDataSource: data sources bound to sites
-echo "Importing AlertDataSourceService for HDFS... "
-
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDataSourceService";
 -d '[{"prefix":"alertDataSource","tags":{"site" : "sandbox", 
"dataSource":"hdfsAuditLog"}, "enabled": "true", "config" : 
"{\"hdfsEndpoint\":\"hdfs://sandbox.hortonworks.com:8020\"}", "desc":"HDFS"}]'
-
-
-## AlertStreamService: alert streams generated from data source
-echo ""
-echo "Importing AlertStreamService for HDFS... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertStreamService";
 -d 
'[{"prefix":"alertStream","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream"},"desc":"alert
 event stream from hdfs audit log"}]'
-
-## AlertExecutorService: what alert streams are consumed by alert executor
-echo ""
-echo "Importing AlertExecutorService for HDFS... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertExecutorService";
 -d 
'[{"prefix":"alertExecutor","tags":{"dataSource":"hdfsAuditLog","alertExecutorId":"hdfsAuditLogAlertExecutor","streamName":"hdfsAuditLogEventStream"},"desc":"alert
 executor for hdfs audit log event stream"}]'
-
-## AlertStreamSchemaService: schema for event from alert stream
-echo ""
-echo "Importing AlertStreamSchemaService for HDFS... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertStreamSchemaService";
 -d 
'[{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"src"},"attrDescription":"source
 directory or file, such as 
/tmp","attrType":"string","category":"","attrValueResolver":"eagle.service.security.hdfs.resolver.HDFSResourceResolver"},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"dst"},"attrDescription":"destination
 directory, such as 
/tmp","attrType":"string","category":"","attrValueResolver":"eagle.service.security.hdfs.resolver.HDFSResourceResolver"},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"host"},"attrDescription":"hostname,
 such as localhost","attrType":"st
 
ring","category":"","attrValueResolver":""},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"timestamp"},"attrDescription":"milliseconds
 of the 
datetime","attrType":"long","category":"","attrValueResolver":""},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"allowed"},"attrDescription":"true,
 false or 
none","attrType":"bool","category":"","attrValueResolver":""},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"user"},"attrDescription":"process
 
user","attrType":"string","category":"","attrValueResolver":""},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"cmd"},"attrDescription":"file/directory
 operation, such as getfileinfo, open, listStatus and so 
on","attrType":"string","category":"","attrValueResolver":"eagle.ser
 
vice.security.hdfs.resolver.HDFSCommandResolver"},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"sensitivityType"},"attrDescription":"mark
 such as AUDITLOG, 
SECURITYLOG","attrType":"string","category":"","attrValueResolver":"eagle.service.security.hdfs.resolver.HDFSSensitivityTypeResolver"},{"prefix":"alertStreamSchema","tags":{"dataSource":"hdfsAuditLog","streamName":"hdfsAuditLogEventStream","attrName":"securityZone"},"attrDescription":"","attrType":"string","category":"","attrValueResolver":""}]'
-
-#####################################################################
-#            Import stream metadata for HIVE
-#####################################################################
-
-## AlertDataSource: data sources bound to sites
-echo ""
-echo "Importing AlertDataSourceService for HIVE... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDataSourceService";
 -d '[{"prefix":"alertDataSource","tags":{"site" : "sandbox", 
"dataSource":"hiveQueryLog"},"enabled": "true", "config" : 
"{\"accessType\":\"metastoredb_jdbc\",\"password\":\"hive\",\"user\":\"hive\",\"jdbcDriverClassName\":\"com.mysql.jdbc.Driver\",\"jdbcUrl\":\"jdbc:mysql://sandbox.hortonworks.com/hive?createDatabaseIfNotExist=true\"}",
 "desc":"HIVE"}]'
-
-## AlertStreamService: alert streams generated from data source
-echo ""
-echo "Importing AlertStreamService for HIVE... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertStreamService";
 -d 
'[{"prefix":"alertStream","tags":{"dataSource":"hiveQueryLog","streamName":"hiveAccessLogStream"},"desc":"alert
 event stream from hive query"}]'
-
-## AlertExecutorService: what alert streams are consumed by alert executor
-echo ""
-echo "Importing AlertExecutorService for HIVE... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertExecutorService";
 -d 
'[{"prefix":"alertExecutor","tags":{"dataSource":"hiveQueryLog","alertExecutorId":"hiveAccessAlertByRunningJob","streamName":"hiveAccessLogStream"},"desc":"alert
 executor for hive query log event stream"}]'
-
-## AlertStreamSchemaServiceService: schema for event from alert stream
-echo ""
-echo "Importing AlertStreamSchemaService for HIVE... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertStreamSchemaService";
 -d 
'[{"prefix":"alertStreamSchema","category":"","attrType":"string","attrDescription":"process
 
user","attrValueResolver":"","tags":{"dataSource":"hiveQueryLog","streamName":"hiveAccessLogStream","attrName":"user"}},{"prefix":"alertStreamSchema","category":"","attrType":"string","attrDescription":"hive
 sql command, such as SELECT, INSERT and 
DELETE","attrValueResolver":"eagle.service.security.hive.resolver.HiveCommandResolver","tags":{"dataSource":"hiveQueryLog","streamName":"hiveAccessLogStream","attrName":"command"}},{"prefix":"alertStreamSchema","category":"","attrType":"long","attrDescription":"milliseconds
 of the 
datetime","attrValueResolver":"","tags":{"dataSource":"hiveQueryLog","streamName":"hiveAccessLogStream","attrName":"timestamp"}},{"prefix":"alertStreamSchem
 a","category":"","attrType":"string","attrDescription":"/database/table/column 
or 
/database/table/*","attrValueResolver":"eagle.service.security.hive.resolver.HiveMetadataResolver","tags":{"dataSource":"hiveQueryLog","streamName":"hiveAccessLogStream","attrName":"resource"}},{"prefix":"alertStreamSchema","category":"","attrType":"string","attrDescription":"mark
 such as 
PHONE_NUMBER","attrValueResolver":"eagle.service.security.hive.resolver.HiveSensitivityTypeResolver","tags":{"dataSource":"hiveQueryLog","streamName":"hiveAccessLogStream","attrName":"sensitivityType"}}]'
-
-#####################################################################
-#            Import stream metadata for UserProfile
-#####################################################################
-echo ""
-echo "Importing AlertDataSourceService for USERPROFILE"
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDataSourceService";
 -d '[{"prefix":"alertDataSource","tags":{"site" : "sandbox", 
"dataSource":"userProfile"}, "enabled": "true", "config" : 
"{\"features\":\"getfileinfo,open,listStatus,setTimes,setPermission,rename,mkdirs,create,setReplication,contentSummary,delete,setOwner,fsck\"}",
 "desc":"USERPROFILE"}]'
-
-echo ""
-echo "Importing AlertDefinitionService for USERPROFILE"
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
"Content-Type: application/json"  
"http://$EAGLE_SERVICE_HOST:$EAGLE_SERVICE_PORT/eagle-service/rest/entities?serviceName=AlertDefinitionService";
 \
-     -d '[ { "prefix": "alertdef", "tags": { "site": "sandbox", "dataSource": 
"userProfile", "alertExecutorId": "userProfileAnomalyDetectionExecutor", 
"policyId": "userProfile", "policyType": "MachineLearning" }, "desc": "user 
profile anomaly detection", "policyDef": 
"{\"type\":\"MachineLearning\",\"alertContext\":{\"site\":\"sandbox\",\"dataSource\":\"userProfile\",\"component\":\"testComponent\",\"description\":\"ML
 based user profile anomaly 
detection\",\"severity\":\"WARNING\",\"notificationByEmail\":\"true\"},\"algorithms\":[{\"name\":\"EigenDecomposition\",\"evaluator\":\"eagle.security.userprofile.impl.UserProfileAnomalyEigenEvaluator\",\"description\":\"EigenBasedAnomalyDetection\",\"features\":\"getfileinfo,
 open, listStatus, setTimes, setPermission, rename, mkdirs, create, 
setReplication, contentSummary, delete, setOwner, 
fsck\"},{\"name\":\"KDE\",\"evaluator\":\"eagle.security.userprofile.impl.UserProfileAnomalyKDEEvaluator\",\"description\":\"DensityBasedAnomalyDetection
 \",\"features\":\"getfileinfo, open, listStatus, setTimes, setPermission, 
rename, mkdirs, create, setReplication, contentSummary, delete, setOwner, 
fsck\"}]}", "dedupeDef": 
"{\"alertDedupIntervalMin\":\"0\",\"emailDedupIntervalMin\":\"0\"}", 
"notificationDef": "", "remediationDef": "", "enabled": true } ]'
-
-echo ""
-echo "Importing AlertExecutorService for USERPROFILE"
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
"Content-Type: application/json"  
"http://$EAGLE_SERVICE_HOST:$EAGLE_SERVICE_PORT/eagle-service/rest/entities?serviceName=AlertExecutorService";
 \
-      -d '[ { "prefix": "alertExecutor", "tags":{ "site":"sandbox", 
"dataSource":"userProfile", "alertExecutorId" : 
"userProfileAnomalyDetectionExecutor", "streamName":"userActivity" }, "desc": 
"user activity data source" } ]'
-
-echo ""
-echo "Importing AlertStreamService for USERPROFILE"
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
"Content-Type: application/json"  
"http://$EAGLE_SERVICE_HOST:$EAGLE_SERVICE_PORT/eagle-service/rest/entities?serviceName=AlertStreamService";
 \
-     -d '[ { "prefix": "alertStream", "tags": { "streamName": "userActivity", 
"site":"sandbox", "dataSource":"userProfile" }, "alertExecutorIdList": [ 
"userProfileAnomalyDetectionExecutor" ] } ]'
-
-## Finished
-echo ""
-echo "Finished initialization for eagle topology"
-
-exit 0

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/prepare-dev-data.sh
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/prepare-dev-data.sh
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/prepare-dev-data.sh
deleted file mode 100755
index b7f6b28..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/prepare-dev-data.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with`
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-./eagle-topology-init.sh
-./sample-policy-create.sh
-./sample-sensitivity-resource-create.sh
-

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
deleted file mode 100755
index 307c542..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-policy-create.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-export EAGLE_SERVICE_USER=admin
-export EAGLE_SERVICE_PASSWD=secret
-export EAGLE_SERVICE_HOST=localhost
-export EAGLE_SERVICE_PORT=38080
-
-#### create hdfs policy sample in sandbox
-echo "create hdfs policy sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDefinitionService";
 -d \
-'[{"tags":{"site":"sandbox","dataSource":"hdfsAuditLog","alertExecutorId":"hdfsAuditLogAlertExecutor","policyId":"viewPrivate","policyType":"siddhiCEPEngine"},"desc":"view
 private file","policyDef":"{\"type\":\"siddhiCEPEngine\",\"expression\":\"from 
hdfsAuditLogEventStream[(cmd=='\'open\'') and (src=='\'/tmp/private\'')] select 
* insert into outputStream\"}","dedupeDef": 
"{\"alertDedupIntervalMin\":0,\"emailDedupIntervalMin\":1440}","notificationDef":
 "[{\"subject\":\"just for 
test\",\"sender\":\"nob...@test.com\",\"recipients\":\"nob...@test.com\",\"flavor\":\"email\",\"id\":\"email_1\",\"tplFileName\":\"\"}]","remediationDef":"","enabled":true}]'
-
-#### create hive policy sample in sandbox
-echo "create hive policy sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=AlertDefinitionService";
 -d \
-'[{"tags":{"site":"sandbox","dataSource":"hiveQueryLog","alertExecutorId":"hiveAccessAlertByRunningJob","policyId":"queryPhoneNumber","policyType":"siddhiCEPEngine"},"desc":"query
 sensitive hive 
data","policyDef":"{\"type\":\"siddhiCEPEngine\",\"expression\":\"from 
hiveAccessLogStream[(sensitivityType=='\'PHONE_NUMBER\'')] select * insert into 
outputStream;\"}","dedupeDef": 
"{\"alertDedupIntervalMin\":0,\"emailDedupIntervalMin\":1440}","notificationDef":
 "[{\"subject\":\"just for 
test\",\"sender\":\"nob...@test.com\",\"recipients\":\"nob...@test.com\",\"flavor\":\"email\",\"id\":\"email_1\",\"tplFileName\":\"\"}]","remediationDef":"","enabled":"true"}]'

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-sensitivity-resource-create.sh
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-sensitivity-resource-create.sh
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-sensitivity-resource-create.sh
deleted file mode 100755
index daefba6..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/initdata/sample-sensitivity-resource-create.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-export EAGLE_SERVICE_USER=admin
-export EAGLE_SERVICE_PASSWD=secret
-export EAGLE_SERVICE_HOST=localhost
-export EAGLE_SERVICE_PORT=38080
-
-#### create hive sensitivity sample in sandbox
-echo "create hive sensitivity sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=HiveResourceSensitivityService";
 -d '[{"tags":{"site" : "sandbox", 
"hiveResource":"/xademo/customer_details/phone_number"}, "sensitivityType": 
"PHONE_NUMBER"}]'
-
-
-#### create hdfs sensitivity sample in sandbox
-echo "create hdfs sensitivity sample in sandbox... "
-curl -u ${EAGLE_SERVICE_USER}:${EAGLE_SERVICE_PASSWD} -X POST -H 
'Content-Type:application/json' 
"http://${EAGLE_SERVICE_HOST}:${EAGLE_SERVICE_PORT}/eagle-service/rest/entities?serviceName=FileSensitivityService";
 -d '[{"tags":{"site" : "sandbox", "filedir":"/tmp/private"}, 
"sensitivityType": "PRIVATE"}]'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
deleted file mode 100644
index 7bfb409..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/EmbeddedHbase.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.eagle.service.hbase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- */
-public class EmbeddedHbase {
-    private HBaseTestingUtility util;
-    private MiniHBaseCluster hBaseCluster;
-    private static EmbeddedHbase hbase;
-    private int port;    
-    private String znode;
-    private static int DEFAULT_PORT = 2181;
-    private static String DEFAULT_ZNODE = "/hbase-unsecure";
-       private static final Logger LOG = 
LoggerFactory.getLogger(EmbeddedHbase.class);
-       
-    private EmbeddedHbase(int port, String znode) {
-       this.port = port;
-       this.znode = znode;     
-    }
-    
-    private EmbeddedHbase(int port) {
-       this(port, DEFAULT_ZNODE);
-    }
-    
-    public static EmbeddedHbase getInstance() {
-       if (hbase == null) {
-               synchronized(EmbeddedHbase.class) {
-                       if (hbase == null) {
-                               hbase = new EmbeddedHbase();
-                               hbase.start();                                  
                
-                       }
-               }
-       }
-       return hbase;
-    }
-    
-    private EmbeddedHbase() {
-       this(DEFAULT_PORT, DEFAULT_ZNODE);
-    }
-
-    public void start() {
-       try {
-               util = new HBaseTestingUtility();
-               Configuration conf= util.getConfiguration();
-               conf.setInt("test.hbase.zookeeper.property.clientPort", port);
-               conf.set("zookeeper.znode.parent", znode);
-               conf.setInt("hbase.zookeeper.property.maxClientCnxns", 200);
-               conf.setInt("hbase.master.info.port", -1);//avoid port 
clobbering
-               // start mini hbase cluster
-               hBaseCluster = util.startMiniCluster();
-               Configuration config = hBaseCluster.getConf();
-               
-               config.set("zookeeper.session.timeout", "120000");
-               config.set("hbase.zookeeper.property.tickTime", "6000");
-               config.set(HConstants.HBASE_CLIENT_PAUSE, "3000");
-               config.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "1");
-               config.set(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, "60000");
-               
-               Runtime.getRuntime().addShutdownHook(new Thread() {
-                   @Override
-                   public void run() {
-                       shutdown();
-                   }
-               }); 
-       }
-       catch (Throwable t) {
-               LOG.error("Got an exception: ",t);
-       }
-    }
-
-    public void shutdown() {           
-       try {
-            util.shutdownMiniCluster();
-        }
-       catch (Throwable t) {
-               LOG.info("Got an exception, " + t , t.getCause());
-               try {
-                util.shutdownMiniCluster();
-               }
-               catch (Throwable t1) {
-               }
-       }
-    }
-    
-    public void createTable(String tableName, String cf) {
-       try {                   
-               util.createTable(tableName, cf);
-       }
-       catch (Exception ex) {
-               LOG.warn("Create table failed, probably table already existed, 
table name: " + tableName);
-       }
-    }
-    
-    public void deleteTable(String tableName){
-       try {
-               util.deleteTable(tableName);
-       }
-       catch (Exception ex) {
-               LOG.warn("Delete table failed, probably table not existed, 
table name: " + tableName);
-       }
-    }
-
-    public static void main(String[] args){
-        EmbeddedHbase hbase = new EmbeddedHbase(12181);
-        hbase.start();
-        for(String table : new Tables().getTables()){
-            hbase.createTable(table, "f");
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
deleted file mode 100644
index f8941bc..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/java/org/apache/eagle/service/hbase/Tables.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.eagle.service.hbase;
-
-import java.util.ArrayList;
-import java.util.List;
-
-public class Tables {
-    List<String> tables = new ArrayList<String>();
-    public Tables(){
-        tables.add("eagle_metric");
-
-        tables.add("actiondetail");
-        tables.add("alertdetail");
-        tables.add("alertgroup");
-        tables.add("alertmeta");
-        tables.add("alertMetaEntity");
-
-        // for alert framework
-        tables.add("alertDataSource");
-        tables.add("alertStream");
-        tables.add("alertExecutor");
-        tables.add("alertStreamSchema");
-        tables.add("alertdef");
-
-        // for security
-        tables.add("hiveResourceSensitivity");
-        tables.add("fileSensitivity");
-        tables.add("ipzone");
-        tables.add("mlmodel");
-        tables.add("userprofile");
-    }
-
-    public List<String> getTables(){
-        return this.tables;
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml
deleted file mode 100644
index 62cdff1..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/main/resources/hbase-default.xml
+++ /dev/null
@@ -1,935 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>file:///tmp/hbase-${user.name}/hbase</value>
-    <description>The directory shared by region servers and into
-      which HBase persists.  The URL should be 'fully-qualified'
-      to include the filesystem scheme.  For example, to specify the
-      HDFS directory '/hbase' where the HDFS instance's namenode is
-      running at namenode.example.org on port 9000, set this value to:
-      hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-      into /tmp.  Change this configuration else all data will be lost
-      on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.port</name>
-    <value>60000</value>
-    <description>The port the HBase Master should bind to.</description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>false</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/tmp/hbase-${user.name}</value>
-    <description>Temporary directory on the local filesystem.
-      Change this setting to point to a location more permanent
-      than '/tmp' (The '/tmp' directory is often cleared on
-      machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>60010</value>
-    <description>The port for the HBase Master web UI.
-      Set to -1 if you do not want a UI instance run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.write.buffer</name>
-    <value>2097152</value>
-    <description>Default size of the HTable clien write buffer in bytes.
-      A bigger buffer takes more memory -- on both the client and server
-      side since server instantiates the passed write buffer to process
-      it -- but a larger buffer size reduces the number of RPCs made.
-      For an estimate of server-side memory-used, evaluate
-      hbase.client.write.buffer * hbase.regionserver.handler.count
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.port</name>
-    <value>60020</value>
-    <description>The port the HBase RegionServer binds to.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the HBase RegionServer web UI
-      Set to -1 if you do not want the RegionServer UI to run.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port.auto</name>
-    <value>false</value>
-    <description>Whether or not the Master or RegionServer
-      UI should search for a port to bind to. Enables automatic port
-      search if hbase.regionserver.info.port is already in use.
-      Useful for testing, turned off by default.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The address for the HBase RegionServer web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.class</name>
-    <value>org.apache.hadoop.hbase.ipc.HRegionInterface</value>
-    <description>The RegionServer interface to use.
-      Used by the client opening proxy to remote region server.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.pause</name>
-    <value>1000</value>
-    <description>General client pause value.  Used mostly as value to wait
-      before running a retry of a failed get, region lookup, etc.</description>
-  </property>
-  <property>
-    <name>hbase.client.retries.number</name>
-    <value>10</value>
-    <description>Maximum retries.  Used as maximum for all retryable
-      operations such as fetching of the root region from root region
-      server, getting a cell's value, starting a row update, etc.
-      Default: 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.bulkload.retries.number</name>
-    <value>0</value>
-    <description>Maximum retries.  This is maximum number of iterations
-      to atomic bulk loads are attempted in the face of splitting operations
-      0 means never give up.  Default: 0.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>1</value>
-    <description>Number of rows that will be fetched when calling next
-      on a scanner if it is not served from (local, client) memory. Higher
-      caching values will enable faster scanners but will eat up more memory
-      and some calls of next may take longer and longer times when the cache 
is empty.
-      Do not set this value such that the time between invocations is greater
-      than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-      instance. This is to set an upper boundary for a single entry saved in a
-      storage file. Since they cannot be split it helps avoiding that a region
-      cannot be split any further because the data is too large. It seems wise
-      to set this to a fraction of the maximum region size. Setting it to zero
-      or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.lease.period</name>
-    <value>60000</value>
-    <description>HRegion server lease period in milliseconds. Default is
-      60 seconds. Clients must report in within this period else they are
-      considered dead.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>10</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-      Same property is used by the Master for count of master handlers.
-      Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.msginterval</name>
-    <value>3000</value>
-    <description>Interval between messages from the RegionServer to Master
-      in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.optionallogflushinterval</name>
-    <value>1000</value>
-    <description>Sync the HLog to the HDFS after this interval if it has not
-      accumulated enough entries to trigger a sync. Default 1 second. Units:
-      milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.regionSplitLimit</name>
-    <value>2147483647</value>
-    <description>Limit for the number of regions after which no more region
-      splitting should take place. This is not a hard limit for the number of
-      regions but acts as a guideline for the regionserver to stop splitting 
after
-      a certain limit. Default is set to MAX_INT; i.e. do not block splitting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.logroll.period</name>
-    <value>3600000</value>
-    <description>Period at which we will roll the commit log regardless
-      of how many edits it has.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.logroll.errors.tolerated</name>
-    <value>2</value>
-    <description>The number of consecutive WAL close errors we will allow
-      before triggering a server abort.  A setting of 0 will cause the
-      region server to abort if closing the current WAL writer fails during
-      log rolling.  Even a small value (2 or 3) will allow a region server
-      to ride over transient HDFS errors.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.hlog.reader.impl</name>
-    
<value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader</value>
-    <description>The HLog file reader implementation.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.hlog.writer.impl</name>
-    
<value>org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogWriter</value>
-    <description>The HLog file writer implementation.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.nbreservationblocks</name>
-    <value>4</value>
-    <description>The number of resevoir blocks of memory release on
-      OOME so we can cleanup properly before server shutdown.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.dns.interface</name>
-    <value>default</value>
-    <description>The name of the Network Interface from which a ZooKeeper 
server
-      should report its IP address.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.dns.nameserver</name>
-    <value>default</value>
-    <description>The host name or IP address of the name server (DNS)
-      which a ZooKeeper server should use to determine the host name used by 
the
-      master for communication and display purposes.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.dns.interface</name>
-    <value>default</value>
-    <description>The name of the Network Interface from which a region server
-      should report its IP address.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.dns.nameserver</name>
-    <value>default</value>
-    <description>The host name or IP address of the name server (DNS)
-      which a region server should use to determine the host name used by the
-      master for communication and display purposes.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.dns.interface</name>
-    <value>default</value>
-    <description>The name of the Network Interface from which a master
-      should report its IP address.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.dns.nameserver</name>
-    <value>default</value>
-    <description>The host name or IP address of the name server (DNS)
-      which a master should use to determine the host name used
-      for communication and display purposes.
-    </description>
-  </property>
-  <property>
-    <name>hbase.balancer.period
-    </name>
-    <value>300000</value>
-    <description>Period at which the region balancer runs in the Master.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regions.slop</name>
-    <value>0.2</value>
-    <description>Rebalance if any regionserver has average + (average * slop) 
regions.
-      Default is 20% slop.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.logcleaner.ttl</name>
-    <value>600000</value>
-    <description>Maximum time a HLog can stay in the .oldlogdir directory,
-      after which it will be cleaned by a Master thread.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.logcleaner.plugins</name>
-    <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value>
-    <description>A comma-separated list of LogCleanerDelegate invoked by
-      the LogsCleaner service. These WAL/HLog cleaners are called in order,
-      so put the HLog cleaner that prunes the most HLog files in front. To
-      implement your own LogCleanerDelegate, just put it in HBase's classpath
-      and add the fully qualified class name here. Always add the above
-      default log cleaners in the list.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.35</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.thread.wakefrequency</name>
-    <value>10000</value>
-    <description>Time to sleep in between searches for work (in milliseconds).
-      Used as sleep interval by service threads such as log roller.
-    </description>
-  </property>
-  <property>
-    <name>hbase.server.versionfile.writeattempts</name>
-    <value>3</value>
-    <description>
-      How many time to retry attempting to write a version file
-      before just aborting. Each attempt is seperated by the
-      hbase.server.thread.wakefrequency milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-      Memstore will be flushed to disk if size of the memstore
-      exceeds this number of bytes.  Value is checked by a thread that runs
-      every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.preclose.flush.size</name>
-    <value>5242880</value>
-    <description>
-      If the memstores in a region are this size or larger when we go
-      to close, run a "pre-flush" to clear out memstores before we put up
-      the region closed flag and take the region offline.  On close,
-      a flush is run under the close flag to empty memory.  During
-      this time the region is offline and we are not taking on any writes.
-      If the memstore content is large, this flush could take a long time to
-      complete.  The preflush is meant to clean out the bulk of the memstore
-      before putting up the close flag and taking the region offline so the
-      flush that runs under the close flag has little to do.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>
-      Block updates if memstore has hbase.hregion.block.memstore
-      time hbase.hregion.flush.size bytes.  Useful preventing
-      runaway memstore during spikes in update traffic.  Without an
-      upper-bound, memstore fills such that when it flushes the
-      resultant flush files take a long time to compact or split, or
-      worse, we OOME.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-      Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-      grown to exceed this value, the hosting HRegion is split in two.
-      Default: 10G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-      If more than this number of HStoreFiles in any one HStore
-      (one HStoreFile is written per flush of memstore) then a compaction
-      is run to rewrite all HStoreFiles files as one.  Larger numbers
-      put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>7</value>
-    <description>
-      If more than this number of StoreFiles in any one Store
-      (one StoreFile is written per flush of MemStore) then updates are
-      blocked for this HRegion until a compaction is completed, or
-      until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingWaitTime</name>
-    <value>90000</value>
-    <description>
-      The time an HRegion will block updates for after hitting the StoreFile
-      limit defined by hbase.hstore.blockingStoreFiles.
-      After this time has elapsed, the HRegion will stop blocking updates even
-      if a compaction has not been completed.  Default: 90 seconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compaction.max</name>
-    <value>10</value>
-    <description>Max number of HStoreFiles to compact per 'minor' compaction.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in miliseconds) between 'major' compactions of all
-      HStoreFiles in a region.  Default: 1 day.
-      Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  <property>
-    <name>hbase.mapreduce.hfileoutputformat.blocksize</name>
-    <value>65536</value>
-    <description>The mapreduce HFileOutputFormat writes storefiles/hfiles.
-      This is the minimum hfile blocksize to emit.  Usually in hbase, writing
-      hfiles, the blocksize is gotten from the table schema (HColumnDescriptor)
-      but in the mapreduce outputformat context, we don't have access to the
-      schema so get blocksize from Configuration.  The smaller you make
-      the blocksize, the bigger your index and the less you fetch on a
-      random-access.  Set the blocksize down if you have small cells and want
-      faster random-access of individual cells.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.25</value>
-    <description>
-      Percentage of maximum heap (-Xmx setting) to allocate to block cache
-      used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-      Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hash.type</name>
-    <value>murmur</value>
-    <description>The hashing algorithm for use in HashFunction. Two values are
-      supported now: murmur (MurmurHash) and jenkins (JenkinsHash).
-      Used by bloom filters.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.index.cacheonwrite</name>
-    <value>false</value>
-    <description>
-      This allows to put non-root multi-level index blocks into the block
-      cache at the time the index is being written.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.checksum.verify</name>
-    <value>false</value>
-    <description>
-      Allow hbase to do checksums rather than using hdfs checksums. This is a 
backwards
-      incompatible change.
-    </description>
-  </property>
-  <property>
-    <name>hfile.index.block.max.size</name>
-    <value>131072</value>
-    <description>
-      When the size of a leaf-level, intermediate-level, or root-level
-      index block in a multi-level block index grows to this size, the
-      block is written out and a new block is started.
-    </description>
-  </property>
-  <property>
-    <name>hfile.format.version</name>
-    <value>2</value>
-    <description>
-      The HFile format version to use for new files. Set this to 1 to test
-      backwards-compatibility. The default value of this option should be
-      consistent with FixedFileTrailer.MAX_VERSION.
-    </description>
-  </property>
-  <property>
-    <name>io.storefile.bloom.block.size</name>
-    <value>131072</value>
-    <description>
-      The size in bytes of a single block ("chunk") of a compound Bloom
-      filter. This size is approximate, because Bloom blocks can only be
-      inserted at data block boundaries, and the number of keys per data
-      block varies.
-    </description>
-  </property>
-  <property>
-    <name>io.storefile.bloom.cacheonwrite</name>
-    <value>false</value>
-    <description>
-      Enables cache-on-write for inline blocks of a compound Bloom filter.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rs.cacheblocksonwrite</name>
-    <value>false</value>
-    <description>
-      Whether an HFile block should be added to the block cache when the
-      block is finished.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-    <description>Implementation of org.apache.hadoop.hbase.ipc.RpcEngine to be
-      used for client / server RPC call marshalling.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_h...@example.com".  The kerberos principal name
-      that should be used to run the HMaster process.  The principal name 
should
-      be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-      portion, it will be replaced with the actual hostname of the running
-      instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-      the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_h...@example.com".  The kerberos principal name
-      that should be used to run the HRegionServer process.  The principal name
-      should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-      hostname portion, it will be replaced with the actual hostname of the
-      running instance.  An entry for this principal must exist in the file
-      specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hadoop.policy.file</name>
-    <value>hbase-policy.xml</value>
-    <description>The policy configuration file used by RPC servers to make
-      authorization decisions on client requests.  Only used when HBase
-      security is enabled.
-    </description>
-  </property>
-  <property>
-    <name>hbase.superuser</name>
-    <value></value>
-    <description>List of users or groups (comma-separated), who are allowed
-      full privileges, regardless of stored ACLs, across the cluster.
-      Only used when HBase security is enabled.
-    </description>
-  </property>
-  <property>
-    <name>hbase.auth.key.update.interval</name>
-    <value>86400000</value>
-    <description>The update interval for master key for authentication tokens
-      in servers in milliseconds.  Only used when HBase security is enabled.
-    </description>
-  </property>
-  <property>
-    <name>hbase.auth.token.max.lifetime</name>
-    <value>604800000</value>
-    <description>The maximum lifetime in milliseconds after which an
-      authentication token expires.  Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>180000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      
http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.rootserver</name>
-    <value>root-region-server</value>
-    <description>Path to ZNode holding root region location. This is written by
-      the master and read by clients and region servers. If a relative path is
-      given, the parent folder will be ${zookeeper.znode.parent}. By default,
-      this means the root location is stored at /hbase/root-region-server.
-    </description>
-  </property>
-
-  <property>
-    <name>zookeeper.znode.acl.parent</name>
-    <value>acl</value>
-    <description>Root ZNode for access control lists.</description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-      default on all tables. For any override coprocessor method, these classes
-      will be called in order. After implementing your own Coprocessor, just 
put
-      it in HBase's classpath and add the fully qualified class name here.
-      A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-      For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-      By default this is set to localhost for local and pseudo-distributed 
modes
-      of operation. For a fully-distributed setup, this should be set to a full
-      list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in 
hbase-env.sh
-      this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.peerport</name>
-    <value>2888</value>
-    <description>Port used by ZooKeeper peers to talk to each other.
-      See 
http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
-      for more information.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.leaderport</name>
-    <value>3888</value>
-    <description>Port used by ZooKeeper for leader election.
-      See 
http://hadoop.apache.org/zookeeper/docs/r3.1.1/zookeeperStarted.html#sc_RunningReplicatedZooKeeper
-      for more information.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update 
functionality.
-      This allows certain ZooKeeper operations to complete more quickly and 
prevents some issues
-      with rare ZooKeeper failure scenarios (see the release note of 
HBASE-6710 for an example).
-      IMPORTANT: only set this to true if all ZooKeeper servers in the cluster 
are on version 3.4+
-      and will not be downgraded.  ZooKeeper versions before 3.4 do not 
support multi-update and will
-      not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-      NOTE: this and future versions of HBase are only supported to work with
-      versions of ZooKeeper with multi support (CDH4+), so it is safe to use 
ZK.multi.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <!--
-  Beginning of properties that are directly mapped from ZooKeeper's zoo.cfg.
-  All properties with an "hbase.zookeeper.property." prefix are converted for
-  ZooKeeper's configuration. Hence, if you want to add an option from zoo.cfg,
-  e.g.  "initLimit=10" you would append the following to your configuration:
-    <property>
-      <name>hbase.zookeeper.property.initLimit</name>
-      <value>10</value>
-    </property>
-  -->
-  <property>
-    <name>hbase.zookeeper.property.initLimit</name>
-    <value>10</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The number of ticks that the initial synchronization phase can take.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.syncLimit</name>
-    <value>5</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The number of ticks that can pass between sending a request and getting 
an
-      acknowledgment.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.dataDir</name>
-    <value>${hbase.tmp.dir}/zookeeper</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The directory where the snapshot is stored.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      The port at which the clients will connect.
-    </description>
-  </property>
-  <property>
-    <name>hbase.zookeeper.property.maxClientCnxns</name>
-    <value>300</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-      Limit on number of concurrent connections (at the socket level) that a
-      single client, identified by IP address, may make to a single member of
-      the ZooKeeper ensemble. Set high to avoid zk connection issues running
-      standalone and pseudo-distributed.
-    </description>
-  </property>
-  <!-- End of properties that are directly mapped from ZooKeeper's zoo.cfg -->
-  <property>
-    <name>hbase.rest.port</name>
-    <value>8080</value>
-    <description>The port for the HBase REST server.</description>
-  </property>
-  <property>
-    <name>hbase.rest.readonly</name>
-    <value>false</value>
-    <description>
-      Defines the mode the REST server will be started in. Possible values are:
-      false: All HTTP methods are permitted - GET/PUT/POST/DELETE.
-      true: Only the GET method is permitted.
-    </description>
-  </property>
-
-  <property skipInDoc="true">
-    <name>hbase.defaults.for.version</name>
-    <value>0.94.2-cdh4.2.1</value>
-    <description>
-      This defaults file was compiled for version 0.94.2-cdh4.2.1. This 
variable is used
-      to make sure that a user doesn't have an old version of 
hbase-default.xml on the
-      classpath.
-    </description>
-  </property>
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>
-      Set to true to skip the 'hbase.defaults.for.version' check.
-      Setting this to true can be useful in contexts other than
-      the other side of a maven generation; i.e. running in an
-      ide.  You'll want to set this boolean to true to avoid
-      seeing the RuntimException complaint: "hbase-default.xml file
-      seems to be for and old version of HBase (0.94.2-cdh4.2.1), this
-      version is X.X.X-SNAPSHOT"
-    </description>
-  </property>
-  <property>
-    <name>hbase.coprocessor.abortonerror</name>
-    <value>false</value>
-    <description>
-      Set to true to cause the hosting server (master or regionserver) to
-      abort if a coprocessor throws a Throwable object that is not IOException 
or
-      a subclass of IOException. Setting it to true might be useful in 
development
-      environments where one wants to terminate the server as soon as possible 
to
-      simplify coprocessor failure analysis.
-    </description>
-  </property>
-  <property>
-    <name>hbase.online.schema.update.enable</name>
-    <value>false</value>
-    <description>
-      Set true to enable online schema changes.  This is an experimental 
feature.
-      There are known issues modifying table schemas at the same time a region
-      split is happening so your table needs to be quiescent or else you have 
to
-      be running with splits disabled.
-    </description>
-  </property>
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>Does HDFS allow appends to files?
-      This is an hdfs config. set in here so the hdfs client will do append 
support.
-      You must ensure that this config. is true serverside too when running 
hbase
-      (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-  <property>
-    <name>hbase.thrift.minWorkerThreads</name>
-    <value>16</value>
-    <description>
-      The "core size" of the thread pool. New threads are created on every
-      connection until this many threads are created.
-    </description>
-  </property>
-  <property>
-    <name>hbase.thrift.maxWorkerThreads</name>
-    <value>1000</value>
-    <description>
-      The maximum size of the thread pool. When the pending request queue
-      overflows, new threads are created until their number reaches this 
number.
-      After that, the server starts dropping connections.
-    </description>
-  </property>
-  <property>
-    <name>hbase.thrift.maxQueuedRequests</name>
-    <value>1000</value>
-    <description>
-      The maximum number of pending Thrift connections waiting in the queue. If
-      there are no idle threads in the pool, the server queues requests. Only
-      when the queue overflows, new threads are added, up to
-      hbase.thrift.maxQueuedRequests threads.
-    </description>
-  </property>
-  <property>
-    <name>hbase.offheapcache.percentage</name>
-    <value>0</value>
-    <description>
-      The amount of off heap space to be allocated towards the experimental
-      off heap cache. If you desire the cache to be disabled, simply set this
-      value to 0.
-    </description>
-  </property>
-  <property>
-    <name>hbase.data.umask.enable</name>
-    <value>false</value>
-    <description>Enable, if true, that file permissions should be assigned
-      to the files written by the regionserver
-    </description>
-  </property>
-  <property>
-    <name>hbase.data.umask</name>
-    <value>000</value>
-    <description>File permissions that should be used to write data
-      files when hbase.data.umask.enable is true
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.metrics.showTableName</name>
-    <value>true</value>
-    <description>Whether to include the prefix "tbl.tablename" in per-column 
family metrics.
-      If true, for each metric M, per-cf metrics will be reported for 
tbl.T.cf.CF.M, if false,
-      per-cf metrics will be aggregated by column-family across tables, and 
reported for cf.CF.M.
-      In both cases, the aggregated metric M across tables and cfs will be 
reported.
-    </description>
-  </property>
-  <property>
-    <name>hbase.table.archive.directory</name>
-    <value>.archive</value>
-    <description>Per-table directory name under which to backup files for a
-      table. Files are moved to the same directories as they would be under the
-      table directory, but instead are just one level lower (under
-      table/.archive/... rather than table/...). Currently only applies to 
HFiles.</description>
-  </property>
-  <property>
-    <name>hbase.master.hfilecleaner.plugins</name>
-    
<value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value>
-    <description>A comma-separated list of HFileCleanerDelegate invoked by
-      the HFileCleaner service. These HFiles cleaners are called in order,
-      so put the cleaner that prunes the most files in front. To
-      implement your own HFileCleanerDelegate, just put it in HBase's classpath
-      and add the fully qualified class name here. Always add the above
-      default log cleaners in the list as they will be overwritten in 
hbase-site.xml.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rest.threads.max</name>
-    <value>100</value>
-    <description>
-      The maximum number of threads of the REST server thread pool.
-      Threads in the pool are reused to process REST requests. This
-      controls the maximum number of requests processed concurrently.
-      It may help to control the memory used by the REST server to
-      avoid OOM issues. If the thread pool is full, incoming requests
-      will be queued up and wait for some free threads. The default
-      is 100.
-    </description>
-  </property>
-  <property>
-    <name>hbase.rest.threads.min</name>
-    <value>2</value>
-    <description>
-      The minimum number of threads of the REST server thread pool.
-      The thread pool always has at least these number of threads so
-      the REST server is ready to serve incoming requests. The default
-      is 2.
-    </description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-eagle/blob/0ea130ef/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
----------------------------------------------------------------------
diff --git 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
 
b/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
deleted file mode 100644
index 87fffcd..0000000
--- 
a/eagle-core/eagle-embed/eagle-embed-hbase/src/test/java/org/apache/eagle/service/hbase/TestHBaseBase.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.eagle.service.hbase;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestHBaseBase {
-    protected static EmbeddedHbase hbase;
-
-    @BeforeClass
-    public static void setUpHBase() {
-        hbase = EmbeddedHbase.getInstance();
-    }
-
-    @Test
-    public void test() {
-
-    }
-
-}
\ No newline at end of file

Reply via email to