This is an automated email from the ASF dual-hosted git repository.

madhan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/atlas.git


The following commit(s) were added to refs/heads/master by this push:
     new 80b305f  ATLAS-4188: Docker setup updated to run Atlas, Solr, Kafka, 
HBase, HDFS, Zookeeper in separate containers
80b305f is described below

commit 80b305f827092f5e006d556aba3e02acee052c2d
Author: Madhan Neethiraj <[email protected]>
AuthorDate: Fri Mar 5 21:09:23 2021 -0800

    ATLAS-4188: Docker setup updated to run Atlas, Solr, Kafka, HBase, HDFS, 
Zookeeper in separate containers
---
 dev-support/atlas-docker/.dockerignore             |   1 +
 dev-support/atlas-docker/.env                      |   5 +-
 dev-support/atlas-docker/Dockerfile.atlas          |  14 +-
 dev-support/atlas-docker/Dockerfile.atlas-base     |  10 +-
 dev-support/atlas-docker/Dockerfile.atlas-hadoop   |  43 ++
 .../{Dockerfile.atlas => Dockerfile.atlas-hbase}   |  32 +-
 .../{Dockerfile.atlas => Dockerfile.atlas-kafka}   |  28 +-
 .../{scripts/atlas.sh => Dockerfile.atlas-solr}    |  33 +-
 .../{scripts/atlas.sh => Dockerfile.atlas-zk}      |  28 +-
 dev-support/atlas-docker/README.md                 |  44 +-
 dev-support/atlas-docker/config/solr/currency.xml  |  67 +++
 .../atlas-docker/config/solr/lang/stopwords_en.txt |  54 ++
 dev-support/atlas-docker/config/solr/protwords.txt |  21 +
 dev-support/atlas-docker/config/solr/schema.xml    | 534 +++++++++++++++++
 .../atlas-docker/config/solr/solrconfig.xml        | 630 +++++++++++++++++++++
 dev-support/atlas-docker/config/solr/stopwords.txt |  14 +
 dev-support/atlas-docker/config/solr/synonyms.txt  |  29 +
 .../atlas-docker/docker-compose.atlas-hadoop.yml   |  31 +
 .../atlas-docker/docker-compose.atlas-hbase.yml    |  30 +
 .../atlas-docker/docker-compose.atlas-kafka.yml    |  27 +
 dev-support/atlas-docker/docker-compose.atlas.yml  |  33 +-
 dev-support/atlas-docker/download-archives.sh      |  49 ++
 dev-support/atlas-docker/downloads/.gitignore      |   1 +
 .../scripts/{atlas.sh => atlas-hadoop-mkdir.sh}    |  34 +-
 .../atlas-docker/scripts/atlas-hadoop-setup.sh     |  60 ++
 dev-support/atlas-docker/scripts/atlas-hadoop.sh   |  54 ++
 .../scripts/{atlas.sh => atlas-hbase-setup.sh}     |  31 +-
 .../scripts/{atlas.sh => atlas-hbase.sh}           |  28 +-
 .../scripts/{atlas.sh => atlas-kafka-setup.sh}     |  30 +-
 .../scripts/{atlas.sh => atlas-kafka.sh}           |  27 +-
 .../atlas-docker/scripts/atlas-solr-create.sh      |   6 +
 dev-support/atlas-docker/scripts/atlas.sh          |  11 +
 dev-support/atlas-docker/scripts/hbase-site.xml    |  52 ++
 distro/pom.xml                                     |   2 -
 pom.xml                                            |   2 +-
 webapp/pom.xml                                     |  18 +-
 36 files changed, 1877 insertions(+), 236 deletions(-)

diff --git a/dev-support/atlas-docker/.dockerignore 
b/dev-support/atlas-docker/.dockerignore
index d6a8480..fa4ce32 100644
--- a/dev-support/atlas-docker/.dockerignore
+++ b/dev-support/atlas-docker/.dockerignore
@@ -1,4 +1,5 @@
 *
 !config
 !dist/apache-atlas-*-bin.tar.gz
+!downloads/*
 !scripts/*
diff --git a/dev-support/atlas-docker/.env b/dev-support/atlas-docker/.env
index 19443ad..79d2b97 100644
--- a/dev-support/atlas-docker/.env
+++ b/dev-support/atlas-docker/.env
@@ -2,6 +2,9 @@ BUILD_HOST_SRC=true
 SKIPTESTS=true
 GIT_URL=https://github.com/apache/atlas.git
 BRANCH=master
-PROFILE=dist,berkeley-solr
+PROFILE=dist,external-hbase-solr
 
 ATLAS_VERSION=3.0.0-SNAPSHOT
+HADOOP_VERSION=3.3.0
+HBASE_VERSION=2.3.3
+KAFKA_VERSION=2.5.0
diff --git a/dev-support/atlas-docker/Dockerfile.atlas 
b/dev-support/atlas-docker/Dockerfile.atlas
index 76e5c59..4bafe80 100644
--- a/dev-support/atlas-docker/Dockerfile.atlas
+++ b/dev-support/atlas-docker/Dockerfile.atlas
@@ -19,19 +19,21 @@ FROM atlas-base:latest
 ARG ATLAS_VERSION
 
 COPY ./scripts/atlas.sh                              ${ATLAS_SCRIPTS}/
-COPY ./dist/apache-atlas-${ATLAS_VERSION}-bin.tar.gz /tmp/
+COPY ./dist/apache-atlas-${ATLAS_VERSION}-bin.tar.gz /home/atlas/dist/
 
-RUN tar xfz /tmp/apache-atlas-${ATLAS_VERSION}-bin.tar.gz --directory=/opt/ && 
\
+RUN tar xfz /home/atlas/dist/apache-atlas-${ATLAS_VERSION}-bin.tar.gz 
--directory=/opt/ && \
     ln -s /opt/apache-atlas-${ATLAS_VERSION} ${ATLAS_HOME} && \
-    rm -f /tmp/apache-atlas-${ATLAS_VERSION}-bin.tar.gz && \
-    mkdir -p /var/run/atlas && \
-    mkdir -p /var/log/atlas && \
-    mkdir -p /home/atlas/data && \
+    rm -f /home/atlas/dist/apache-atlas-${ATLAS_VERSION}-bin.tar.gz && \
+    mkdir -p /var/run/atlas /var/log/atlas /home/atlas/data 
${ATLAS_HOME}/hbase/conf && \
     rm -rf ${ATLAS_HOME}/logs && \
     ln -s /var/log/atlas ${ATLAS_HOME}/logs && \
     ln -s /home/atlas/data ${ATLAS_HOME}/data && \
     chown -R atlas:atlas ${ATLAS_HOME}/ /var/run/atlas/ /var/log/atlas/
 
+COPY ./scripts/hbase-site.xml ${ATLAS_HOME}/hbase/conf/
+
 VOLUME /home/atlas/data
 
+EXPOSE 21000
+
 ENTRYPOINT [ "/home/atlas/scripts/atlas.sh" ]
diff --git a/dev-support/atlas-docker/Dockerfile.atlas-base 
b/dev-support/atlas-docker/Dockerfile.atlas-base
index 36e8f0f..e68daa6 100644
--- a/dev-support/atlas-docker/Dockerfile.atlas-base
+++ b/dev-support/atlas-docker/Dockerfile.atlas-base
@@ -17,9 +17,9 @@
 FROM ubuntu:20.04
 
 
-# Install curl, wget, tzdata, Python, Java
+# Install tzdata, Python, Java
 RUN apt-get update && \
-    DEBIAN_FRONTEND="noninteractive" apt-get -y install curl wget tzdata \
+    DEBIAN_FRONTEND="noninteractive" apt-get -y install tzdata \
     python python3 python3-pip openjdk-8-jdk bc iputils-ping ssh pdsh
 
 # Set environment variables
@@ -33,6 +33,12 @@ ENV PATH          
/usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bi
 # setup groups, users, directories
 RUN groupadd atlas && \
     useradd -g atlas -ms /bin/bash atlas && \
+    groupadd hadoop && \
+    useradd -g hadoop -ms /bin/bash hdfs && \
+    useradd -g hadoop -ms /bin/bash yarn && \
+    useradd -g hadoop -ms /bin/bash hive && \
+    useradd -g hadoop -ms /bin/bash hbase && \
+    useradd -g hadoop -ms /bin/bash kafka && \
     mkdir -p /home/atlas/dist && \
     mkdir -p /home/atlas/scripts && \
     chown -R atlas:atlas /home/atlas
diff --git a/dev-support/atlas-docker/Dockerfile.atlas-hadoop 
b/dev-support/atlas-docker/Dockerfile.atlas-hadoop
new file mode 100644
index 0000000..26e766e
--- /dev/null
+++ b/dev-support/atlas-docker/Dockerfile.atlas-hadoop
@@ -0,0 +1,43 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM atlas-base:latest
+
+ARG HADOOP_VERSION
+
+
+COPY ./downloads/hadoop-${HADOOP_VERSION}.tar.gz /home/atlas/dist/
+
+COPY ./scripts/atlas-hadoop-setup.sh             /home/atlas/scripts/
+COPY ./scripts/atlas-hadoop.sh                   /home/atlas/scripts/
+COPY ./scripts/atlas-hadoop-mkdir.sh             /home/atlas/scripts/
+
+RUN tar xvfz /home/atlas/dist/hadoop-${HADOOP_VERSION}.tar.gz 
--directory=/opt/ && \
+    ln -s /opt/hadoop-${HADOOP_VERSION} /opt/hadoop && \
+    rm -f /home/atlas/dist/hadoop-${HADOOP_VERSION}.tar.gz
+
+ENV HADOOP_HOME        /opt/hadoop
+ENV HADOOP_CONF_DIR    /opt/hadoop/etc/hadoop
+ENV HADOOP_HDFS_HOME   /opt/hadoop
+ENV HADOOP_MAPRED_HOME /opt/hadoop
+ENV HADOOP_COMMON_HOME /opt/hadoop
+ENV YARN_HOME          /opt/hadoop
+ENV PATH               
/usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/hadoop/bin
+
+EXPOSE 9000
+EXPOSE 8088
+
+ENTRYPOINT [ "/home/atlas/scripts/atlas-hadoop.sh" ]
diff --git a/dev-support/atlas-docker/Dockerfile.atlas 
b/dev-support/atlas-docker/Dockerfile.atlas-hbase
similarity index 52%
copy from dev-support/atlas-docker/Dockerfile.atlas
copy to dev-support/atlas-docker/Dockerfile.atlas-hbase
index 76e5c59..1c5fa28 100644
--- a/dev-support/atlas-docker/Dockerfile.atlas
+++ b/dev-support/atlas-docker/Dockerfile.atlas-hbase
@@ -17,21 +17,25 @@
 FROM atlas-base:latest
 
 ARG ATLAS_VERSION
+ARG HBASE_VERSION
 
-COPY ./scripts/atlas.sh                              ${ATLAS_SCRIPTS}/
-COPY ./dist/apache-atlas-${ATLAS_VERSION}-bin.tar.gz /tmp/
 
-RUN tar xfz /tmp/apache-atlas-${ATLAS_VERSION}-bin.tar.gz --directory=/opt/ && 
\
-    ln -s /opt/apache-atlas-${ATLAS_VERSION} ${ATLAS_HOME} && \
-    rm -f /tmp/apache-atlas-${ATLAS_VERSION}-bin.tar.gz && \
-    mkdir -p /var/run/atlas && \
-    mkdir -p /var/log/atlas && \
-    mkdir -p /home/atlas/data && \
-    rm -rf ${ATLAS_HOME}/logs && \
-    ln -s /var/log/atlas ${ATLAS_HOME}/logs && \
-    ln -s /home/atlas/data ${ATLAS_HOME}/data && \
-    chown -R atlas:atlas ${ATLAS_HOME}/ /var/run/atlas/ /var/log/atlas/
+COPY ./downloads/hbase-${HBASE_VERSION}-bin.tar.gz     /home/atlas/dist/
 
-VOLUME /home/atlas/data
+COPY ./scripts/atlas-hbase-setup.sh                    /home/atlas/scripts/
+COPY ./scripts/atlas-hbase.sh                          /home/atlas/scripts/
+COPY ./scripts/hbase-site.xml                          /home/atlas/scripts/
 
-ENTRYPOINT [ "/home/atlas/scripts/atlas.sh" ]
+RUN tar xvfz /home/atlas/dist/hbase-${HBASE_VERSION}-bin.tar.gz 
--directory=/opt/ && \
+    ln -s /opt/hbase-${HBASE_VERSION} /opt/hbase && \
+    rm -f /home/atlas/dist/hbase-${HBASE_VERSION}-bin.tar.gz
+
+ENV HBASE_HOME /opt/hbase
+ENV PATH       
/usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/hbase/bin
+
+EXPOSE 16000
+EXPOSE 16010
+EXPOSE 16020
+EXPOSE 16030
+
+ENTRYPOINT [ "/home/atlas/scripts/atlas-hbase.sh" ]
diff --git a/dev-support/atlas-docker/Dockerfile.atlas 
b/dev-support/atlas-docker/Dockerfile.atlas-kafka
similarity index 53%
copy from dev-support/atlas-docker/Dockerfile.atlas
copy to dev-support/atlas-docker/Dockerfile.atlas-kafka
index 76e5c59..e06d628 100644
--- a/dev-support/atlas-docker/Dockerfile.atlas
+++ b/dev-support/atlas-docker/Dockerfile.atlas-kafka
@@ -17,21 +17,21 @@
 FROM atlas-base:latest
 
 ARG ATLAS_VERSION
+ARG KAFKA_VERSION
 
-COPY ./scripts/atlas.sh                              ${ATLAS_SCRIPTS}/
-COPY ./dist/apache-atlas-${ATLAS_VERSION}-bin.tar.gz /tmp/
 
-RUN tar xfz /tmp/apache-atlas-${ATLAS_VERSION}-bin.tar.gz --directory=/opt/ && 
\
-    ln -s /opt/apache-atlas-${ATLAS_VERSION} ${ATLAS_HOME} && \
-    rm -f /tmp/apache-atlas-${ATLAS_VERSION}-bin.tar.gz && \
-    mkdir -p /var/run/atlas && \
-    mkdir -p /var/log/atlas && \
-    mkdir -p /home/atlas/data && \
-    rm -rf ${ATLAS_HOME}/logs && \
-    ln -s /var/log/atlas ${ATLAS_HOME}/logs && \
-    ln -s /home/atlas/data ${ATLAS_HOME}/data && \
-    chown -R atlas:atlas ${ATLAS_HOME}/ /var/run/atlas/ /var/log/atlas/
+COPY ./downloads/kafka_2.12-${KAFKA_VERSION}.tgz         /home/atlas/dist/
 
-VOLUME /home/atlas/data
+COPY ./scripts/atlas-kafka-setup.sh                     /home/atlas/scripts/
+COPY ./scripts/atlas-kafka.sh                           /home/atlas/scripts/
 
-ENTRYPOINT [ "/home/atlas/scripts/atlas.sh" ]
+RUN tar xvfz /home/atlas/dist/kafka_2.12-${KAFKA_VERSION}.tgz 
--directory=/opt/ && \
+    ln -s /opt/kafka_2.12-${KAFKA_VERSION} /opt/kafka && \
+    rm -f /home/atlas/dist/kafka_2.12-${KAFKA_VERSION}.tgz
+
+ENV KAFKA_HOME /opt/kafka
+ENV PATH       
/usr/java/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kafka/bin
+
+EXPOSE 9092
+
+ENTRYPOINT [ "/home/atlas/scripts/atlas-kafka.sh" ]
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/Dockerfile.atlas-solr
old mode 100755
new mode 100644
similarity index 56%
copy from dev-support/atlas-docker/scripts/atlas.sh
copy to dev-support/atlas-docker/Dockerfile.atlas-solr
index 7abb6f7..17c14bb
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/Dockerfile.atlas-solr
@@ -1,5 +1,3 @@
-#!/bin/bash
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -16,28 +14,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
-
-if [ ! -e ${ATLAS_HOME}/.setupDone ]
-then
-  SETUP_ATLAS=true
-else
-  SETUP_ATLAS=false
-fi
-
-if [ "${SETUP_ATLAS}" == "true" ]
-then
-  encryptedPwd=$(${ATLAS_HOME}/bin/cputil.py -g -u admin -p atlasR0cks! -s)
+FROM solr:8
 
-  echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
+USER 0
 
-  chown -R atlas:atlas ${ATLAS_HOME}/
+# Copy Atlas collections config set
+RUN  mkdir -p /opt/solr/server/solr/configsets/atlas/conf/lang
+COPY ./config/solr/*.*       /opt/solr/server/solr/configsets/atlas/conf/
+COPY ./config/solr/lang/*.* /opt/solr/server/solr/configsets/atlas/conf/lang/
+RUN chown -R solr:solr /opt/solr/server/solr/configsets/atlas/
 
-  touch ${ATLAS_HOME}/.setupDone
-fi
+# Copy script that creates Atlas collections
+COPY ./scripts/atlas-solr-create.sh /docker-entrypoint-initdb.d/
 
-su -c "cd ${ATLAS_HOME}/bin && ./atlas_start.py" atlas
-ATLAS_PID=`ps -ef  | grep -v grep | grep -i "org.apache.atlas.Atlas" | awk 
'{print $2}'`
+EXPOSE 8983
 
-# prevent the container from exiting
-tail --pid=$ATLAS_PID -f /dev/null
+USER solr
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/Dockerfile.atlas-zk
old mode 100755
new mode 100644
similarity index 56%
copy from dev-support/atlas-docker/scripts/atlas.sh
copy to dev-support/atlas-docker/Dockerfile.atlas-zk
index 7abb6f7..2855b50
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/Dockerfile.atlas-zk
@@ -1,5 +1,3 @@
-#!/bin/bash
-
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -16,28 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
-
-if [ ! -e ${ATLAS_HOME}/.setupDone ]
-then
-  SETUP_ATLAS=true
-else
-  SETUP_ATLAS=false
-fi
-
-if [ "${SETUP_ATLAS}" == "true" ]
-then
-  encryptedPwd=$(${ATLAS_HOME}/bin/cputil.py -g -u admin -p atlasR0cks! -s)
-
-  echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
-
-  chown -R atlas:atlas ${ATLAS_HOME}/
-
-  touch ${ATLAS_HOME}/.setupDone
-fi
-
-su -c "cd ${ATLAS_HOME}/bin && ./atlas_start.py" atlas
-ATLAS_PID=`ps -ef  | grep -v grep | grep -i "org.apache.atlas.Atlas" | awk 
'{print $2}'`
-
-# prevent the container from exiting
-tail --pid=$ATLAS_PID -f /dev/null
+FROM zookeeper:3.5.9
diff --git a/dev-support/atlas-docker/README.md 
b/dev-support/atlas-docker/README.md
index 37caa3a..6ba4058 100644
--- a/dev-support/atlas-docker/README.md
+++ b/dev-support/atlas-docker/README.md
@@ -23,54 +23,28 @@ Docker files in this folder create docker images and run 
them to build Apache At
 
 ## Usage
 
-1. Ensure that you have recent version of Docker installed from 
[docker.io](http://www.docker.io) (as of this writing: Engine 19.03, Compose 
1.26.2).
+1. Ensure that you have recent version of Docker installed from 
[docker.io](http://www.docker.io) (as of this writing: Engine 20.10.5, Compose 
1.28.5).
+   Make sure to configure docker with at least 6gb of memory.
 
 2. Set this folder as your working directory.
 
 3. Update environment variables in .env file, if necessary
 
-4. Using docker-compose is the simpler way to build and deploy Apache Atlas in 
containers.
+4. Execute following command to download necessary archives to setup 
Atlas/HDFS/HBase/Kafka services:
+     ./download-archives.sh
 
-   4.1. Execute following command to build Apache Atlas:
+5. Build and deploy Apache Atlas in containers using docker-compose
+
+   5.1. Execute following command to build Apache Atlas:
 
         docker-compose -f docker-compose.atlas-base.yml -f 
docker-compose.atlas-build.yml up
 
    Time taken to complete the build might vary (upto an hour), depending on 
status of ${HOME}/.m2 directory cache.
 
-   4.2. Execute following command to install and start Atlas in a container:
+   5.2. Execute following command to install and start Atlas and dependent 
services (Solr, HBase, Kafka) in containers:
 
-        docker-compose -f docker-compose.atlas-base.yml -f 
docker-compose.atlas.yml up -d
+        docker-compose -f docker-compose.atlas-base.yml -f 
docker-compose.atlas.yml -f docker-compose.atlas-hadoop.yml -f 
docker-compose.atlas-hbase.yml -f docker-compose.atlas-kafka.yml up -d
 
    Apache Atlas will be installed at /opt/atlas/, and logs are at 
/var/logs/atlas directory.
 
-5. Alternatively docker command can be used to build and deploy Apache Atlas.
-
-   5.1. Execute following command to build Docker image **atlas-base**:
-
-        docker build -f Dockerfile.atlas-base -t atlas-base .
-
-   This might take about 10 minutes to complete.
-
-   5.2. Execute following command to build Docker image **atlas-build**:
-
-        docker build -f Dockerfile.atlas-build -t atlas-build .
-
-   5.3. Build Apache Atlas in a container with one of the following commands:
-
-        docker run -it --rm -v ${HOME}/.m2:/home/atlas/.m2:delegated -v 
$(pwd)/scripts:/home/atlas/scripts -v $(pwd)/../..:/home/atlas/src:delegated -v 
$(pwd)/patches:/home/atlas/patches -v $(pwd)/dist:/home/atlas/dist --env-file 
./.env atlas-build
-
-   Time taken to complete the build might vary (upto an hour), depending on 
status of ${HOME}/.m2 directory cache.
-
-   5.4. Execute following command to build Docker image **atlas**:
-
-        docker build -f Dockerfile.atlas --build-arg 
ATLAS_VERSION=3.0.0-SNAPSHOT -t atlas .
-
-   This might take about 10 minutes to complete.
-
-   5.5. Execute following command to install and run Atlas services in a 
container:
-
-        docker run -it -d --name atlas --hostname atlas.example.com -p 
21000:21000 -v $(pwd)/data:/home/atlas/data atlas
-
-   This might take few minutes to complete.
-
 6. Atlas Admin can be accessed at http://localhost:21000 (admin/atlasR0cks!)
diff --git a/dev-support/atlas-docker/config/solr/currency.xml 
b/dev-support/atlas-docker/config/solr/currency.xml
new file mode 100644
index 0000000..3a9c58a
--- /dev/null
+++ b/dev-support/atlas-docker/config/solr/currency.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Example exchange rates file for CurrencyField type named "currency" in 
example schema -->
+
+<currencyConfig version="1.0">
+  <rates>
+    <!-- Updated from http://www.exchangerate.com/ at 2011-09-27 -->
+    <rate from="USD" to="ARS" rate="4.333871" comment="ARGENTINA Peso" />
+    <rate from="USD" to="AUD" rate="1.025768" comment="AUSTRALIA Dollar" />
+    <rate from="USD" to="EUR" rate="0.743676" comment="European Euro" />
+    <rate from="USD" to="BRL" rate="1.881093" comment="BRAZIL Real" />
+    <rate from="USD" to="CAD" rate="1.030815" comment="CANADA Dollar" />
+    <rate from="USD" to="CLP" rate="519.0996" comment="CHILE Peso" />
+    <rate from="USD" to="CNY" rate="6.387310" comment="CHINA Yuan" />
+    <rate from="USD" to="CZK" rate="18.47134" comment="CZECH REP. Koruna" />
+    <rate from="USD" to="DKK" rate="5.515436" comment="DENMARK Krone" />
+    <rate from="USD" to="HKD" rate="7.801922" comment="HONG KONG Dollar" />
+    <rate from="USD" to="HUF" rate="215.6169" comment="HUNGARY Forint" />
+    <rate from="USD" to="ISK" rate="118.1280" comment="ICELAND Krona" />
+    <rate from="USD" to="INR" rate="49.49088" comment="INDIA Rupee" />
+    <rate from="USD" to="XDR" rate="0.641358" comment="INTNL MON. FUND SDR" />
+    <rate from="USD" to="ILS" rate="3.709739" comment="ISRAEL Sheqel" />
+    <rate from="USD" to="JPY" rate="76.32419" comment="JAPAN Yen" />
+    <rate from="USD" to="KRW" rate="1169.173" comment="KOREA (SOUTH) Won" />
+    <rate from="USD" to="KWD" rate="0.275142" comment="KUWAIT Dinar" />
+    <rate from="USD" to="MXN" rate="13.85895" comment="MEXICO Peso" />
+    <rate from="USD" to="NZD" rate="1.285159" comment="NEW ZEALAND Dollar" />
+    <rate from="USD" to="NOK" rate="5.859035" comment="NORWAY Krone" />
+    <rate from="USD" to="PKR" rate="87.57007" comment="PAKISTAN Rupee" />
+    <rate from="USD" to="PEN" rate="2.730683" comment="PERU Sol" />
+    <rate from="USD" to="PHP" rate="43.62039" comment="PHILIPPINES Peso" />
+    <rate from="USD" to="PLN" rate="3.310139" comment="POLAND Zloty" />
+    <rate from="USD" to="RON" rate="3.100932" comment="ROMANIA Leu" />
+    <rate from="USD" to="RUB" rate="32.14663" comment="RUSSIA Ruble" />
+    <rate from="USD" to="SAR" rate="3.750465" comment="SAUDI ARABIA Riyal" />
+    <rate from="USD" to="SGD" rate="1.299352" comment="SINGAPORE Dollar" />
+    <rate from="USD" to="ZAR" rate="8.329761" comment="SOUTH AFRICA Rand" />
+    <rate from="USD" to="SEK" rate="6.883442" comment="SWEDEN Krona" />
+    <rate from="USD" to="CHF" rate="0.906035" comment="SWITZERLAND Franc" />
+    <rate from="USD" to="TWD" rate="30.40283" comment="TAIWAN Dollar" />
+    <rate from="USD" to="THB" rate="30.89487" comment="THAILAND Baht" />
+    <rate from="USD" to="AED" rate="3.672955" comment="U.A.E. Dirham" />
+    <rate from="USD" to="UAH" rate="7.988582" comment="UKRAINE Hryvnia" />
+    <rate from="USD" to="GBP" rate="0.647910" comment="UNITED KINGDOM Pound" />
+    
+    <!-- Cross-rates for some common currencies -->
+    <rate from="EUR" to="GBP" rate="0.869914" />  
+    <rate from="EUR" to="NOK" rate="7.800095" />  
+    <rate from="GBP" to="NOK" rate="8.966508" />  
+  </rates>
+</currencyConfig>
diff --git a/dev-support/atlas-docker/config/solr/lang/stopwords_en.txt 
b/dev-support/atlas-docker/config/solr/lang/stopwords_en.txt
new file mode 100644
index 0000000..2c164c0
--- /dev/null
+++ b/dev-support/atlas-docker/config/solr/lang/stopwords_en.txt
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# a couple of test stopwords to test that the words are really being
+# configured from this file:
+stopworda
+stopwordb
+
+# Standard english stop words taken from Lucene's StopAnalyzer
+a
+an
+and
+are
+as
+at
+be
+but
+by
+for
+if
+in
+into
+is
+it
+no
+not
+of
+on
+or
+such
+that
+the
+their
+then
+there
+these
+they
+this
+to
+was
+will
+with
diff --git a/dev-support/atlas-docker/config/solr/protwords.txt 
b/dev-support/atlas-docker/config/solr/protwords.txt
new file mode 100644
index 0000000..1dfc0ab
--- /dev/null
+++ b/dev-support/atlas-docker/config/solr/protwords.txt
@@ -0,0 +1,21 @@
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+# Use a protected word file to protect against the stemmer reducing two
+# unrelated words to the same base word.
+
+# Some non-words that normally won't be encountered,
+# just to test that they won't be stemmed.
+dontstems
+zwhacky
+
diff --git a/dev-support/atlas-docker/config/solr/schema.xml 
b/dev-support/atlas-docker/config/solr/schema.xml
new file mode 100644
index 0000000..7c2ad3c
--- /dev/null
+++ b/dev-support/atlas-docker/config/solr/schema.xml
@@ -0,0 +1,534 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--  
+ This is the Solr schema file. This file should be named "schema.xml" and
+ should be in the conf directory under the solr home
+ (i.e. ./solr/conf/schema.xml by default) 
+ or located where the classloader for the Solr webapp can find it.
+
+ This example schema is the recommended starting point for users.
+ It should be kept correct and concise, usable out-of-the-box.
+
+ For more information, on how to customize this file, please see
+ http://wiki.apache.org/solr/SchemaXml
+-->
+
+<schema name="titan-schema" version="1.5">
+  <!-- attribute "name" is the name of this schema and is only used for 
display purposes.
+       version="x.y" is Solr's version number for the schema syntax and 
+       semantics.  It should not normally be changed by applications.
+
+       1.0: multiValued attribute did not exist, all fields are multiValued 
+            by nature
+       1.1: multiValued attribute introduced, false by default 
+       1.2: omitTermFreqAndPositions attribute introduced, true by default 
+            except for text fields.
+       1.3: removed optional field compress feature
+       1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser
+            behavior when a single string produces multiple tokens.  Defaults 
+            to off for version >= 1.4
+       1.5: omitNorms defaults to true for primitive field types 
+            (int, float, boolean, string...)
+     -->
+
+
+   <!-- Valid attributes for fields:
+     name: mandatory - the name for the field
+     type: mandatory - the name of a field type from the 
+       <types> fieldType section
+     indexed: true if this field should be indexed (searchable or sortable)
+     stored: true if this field should be retrievable
+     docValues: true if this field should have doc values. Doc values are
+       useful for faceting, grouping, sorting and function queries. Although 
not
+       required, doc values will make the index faster to load, more
+       NRT-friendly and more memory-efficient. They however come with some
+       limitations: they are currently only supported by StrField, UUIDField
+       and all Trie*Fields, and depending on the field type, they might
+       require the field to be single-valued, be required or have a default
+       value (check the documentation of the field type you're interested in
+       for more information)
+     multiValued: true if this field may contain multiple values per document
+     omitNorms: (expert) set to true to omit the norms associated with
+       this field (this disables length normalization and index-time
+       boosting for the field, and saves some memory).  Only full-text
+       fields or fields that need an index-time boost need norms.
+       Norms are omitted for primitive (non-analyzed) types by default.
+     termVectors: [false] set to true to store the term vector for a
+       given field.
+       When using MoreLikeThis, fields used for similarity should be
+       stored for best performance.
+     termPositions: Store position information with the term vector.  
+       This will increase storage costs.
+     termOffsets: Store offset information with the term vector. This 
+       will increase storage costs.
+     required: The field is required.  It will throw an error if the
+       value does not exist
+     default: a value that should be used if no value is specified
+       when adding a document.
+   -->
+
+   <!-- field names should consist of alphanumeric or underscore characters 
only and
+      not start with a digit.  This is not currently strictly enforced,
+      but other field names will not have first class support from all 
components
+      and back compatibility is not guaranteed.  Names with both leading and
+      trailing underscores (e.g. _version_) are reserved.
+   -->
+
+   <!-- If you remove this field, you must _also_ disable the update log in 
solrconfig.xml
+      or Solr won't start. _version_ and update log are required for SolrCloud
+   --> 
+   <field name="_version_" type="long" indexed="true" stored="true"/>
+   
+   <!-- points to the root document of a block of nested documents. Required 
for nested
+      document support, may be removed otherwise
+   -->
+   <field name="_root_" type="string" indexed="true" stored="false"/>
+
+   <!-- Only remove the "id" field if you have a very good reason to. While 
not strictly
+     required, it is highly recommended. A <uniqueKey> is present in almost 
all Solr 
+     installations. See the <uniqueKey> declaration below where <uniqueKey> is 
set to "id".
+   -->   
+   <field name="id" type="string" indexed="true" stored="true" required="true" 
multiValued="false" /> 
+
+   <!-- Dynamic field definitions allow using convention over configuration
+       for fields via the specification of patterns to match field names. 
+       EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, 
z_i)
+       RESTRICTION: the glob-like pattern in the name attribute must have
+       a "*" only at the start or the end.  -->
+
+   <dynamicField name="*_i"  type="int"    indexed="true"  stored="true"/>
+   <dynamicField name="*_is" type="int"    indexed="true"  stored="true"  
multiValued="true"/>
+   <dynamicField name="*_s"  type="string"  indexed="true"  stored="true" />
+   <dynamicField name="*_ss" type="string"  indexed="true"  stored="true" 
multiValued="true"/>
+   <dynamicField name="*_l"  type="long"   indexed="true"  stored="true"/>
+   <dynamicField name="*_ls" type="long"   indexed="true"  stored="true"  
multiValued="true"/>
+   <dynamicField name="*_t"  type="text_general"    indexed="true"  
stored="true"/>
+   <dynamicField name="*_txt" type="text_general"   indexed="true"  
stored="true" multiValued="true"/>
+   <dynamicField name="*_en"  type="text_en"    indexed="true"  stored="true" 
multiValued="true"/>
+   <dynamicField name="*_b"  type="boolean" indexed="true" stored="true"/>
+   <dynamicField name="*_bs" type="boolean" indexed="true" stored="true"  
multiValued="true"/>
+   <dynamicField name="*_f"  type="float"  indexed="true"  stored="true"/>
+   <dynamicField name="*_fs" type="float"  indexed="true"  stored="true"  
multiValued="true"/>
+   <dynamicField name="*_d"  type="double" indexed="true"  stored="true"/>
+   <dynamicField name="*_ds" type="double" indexed="true"  stored="true"  
multiValued="true"/>
+
+   <!-- Type used to index the lat and lon components for the "location" 
FieldType -->
+   <dynamicField name="*_coordinate"  type="tdouble" indexed="true"  
stored="false" />
+
+   <dynamicField name="*_dt"  type="date"    indexed="true"  stored="true"/>
+   <dynamicField name="*_dts" type="date"    indexed="true"  stored="true" 
multiValued="true"/>
+   <dynamicField name="*_p"  type="location" indexed="true" stored="true"/>
+
+   <!-- some trie-coded dynamic fields for faster range queries -->
+   <dynamicField name="*_ti" type="tint"    indexed="true"  stored="true"/>
+   <dynamicField name="*_tl" type="tlong"   indexed="true"  stored="true"/>
+   <dynamicField name="*_tf" type="tfloat"  indexed="true"  stored="true"/>
+   <dynamicField name="*_td" type="tdouble" indexed="true"  stored="true"/>
+   <dynamicField name="*_tdt" type="tdate"  indexed="true"  stored="true"/>
+
+   <dynamicField name="*_c"   type="currency" indexed="true"  stored="true"/>
+
+   <dynamicField name="ignored_*" type="ignored" multiValued="true"/>
+   <dynamicField name="attr_*" type="text_general" indexed="true" 
stored="true" multiValued="true"/>
+
+   <dynamicField name="random_*" type="random" />
+
+   <!-- uncomment the following to ignore any fields that don't already match 
an existing 
+        field name or dynamic field, rather than reporting them as an error. 
+        alternately, change the type="ignored" to some other type e.g. "text" 
if you want 
+        unknown fields indexed and/or stored by default --> 
+   <!--dynamicField name="*" type="ignored" multiValued="true" /-->
+
+ <!-- Field to use to determine and enforce document uniqueness. 
+      Unless this field is marked with required="false", it will be a required 
field
+   -->
+ <uniqueKey>id</uniqueKey>
+
+  <!-- copyField commands copy one field to another at the time a document
+        is added to the index.  It's used either to index the same field 
differently,
+        or to add multiple fields to the same field for easier/faster 
searching.  -->
+
+  <!--
+   <copyField source="title" dest="text"/>
+   <copyField source="body" dest="text"/>
+  -->
+  
+    <!-- field type definitions. The "name" attribute is
+       just a label to be used by field definitions.  The "class"
+       attribute and any other attributes determine the real
+       behavior of the fieldType.
+         Class names starting with "solr" refer to java classes in a
+       standard package such as org.apache.solr.analysis
+    -->
+
+    <!-- The StrField type is not analyzed, but indexed/stored verbatim.
+       It supports doc values but in that case the field needs to be
+       single-valued and either required or have a default value.
+      -->
+    <fieldType name="string" class="solr.StrField" sortMissingLast="true" />
+
+    <!-- boolean type: "true" or "false" -->
+    <fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
+
+    <!-- sortMissingLast and sortMissingFirst attributes are optional 
attributes are
+         currently supported on types that are sorted internally as strings
+         and on numeric types.
+            This includes "string","boolean", and, as of 3.5 (and 4.x),
+            int, float, long, date, double, including the "Trie" variants.
+       - If sortMissingLast="true", then a sort on this field will cause 
documents
+         without the field to come after documents with the field,
+         regardless of the requested sort order (asc or desc).
+       - If sortMissingFirst="true", then a sort on this field will cause 
documents
+         without the field to come before documents with the field,
+         regardless of the requested sort order.
+       - If sortMissingLast="false" and sortMissingFirst="false" (the default),
+         then default lucene sorting will be used which places docs without the
+         field first in an ascending sort and last in a descending sort.
+    -->    
+
+    <!--
+      Default numeric field types. For faster range queries, consider the 
tint/tfloat/tlong/tdouble types.
+
+      These fields support doc values, but they require the field to be
+      single-valued and either be required or have a default value.
+    -->
+    <fieldType name="int" class="solr.TrieIntField" precisionStep="0" 
positionIncrementGap="0"/>
+    <fieldType name="float" class="solr.TrieFloatField" precisionStep="0" 
positionIncrementGap="0"/>
+    <fieldType name="long" class="solr.TrieLongField" precisionStep="0" 
positionIncrementGap="0"/>
+    <fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" 
positionIncrementGap="0"/>
+
+    <!--
+     Numeric field types that index each value at various levels of precision
+     to accelerate range queries when the number of values between the range
+     endpoints is large. See the javadoc for NumericRangeQuery for internal
+     implementation details.
+
+     Smaller precisionStep values (specified in bits) will lead to more tokens
+     indexed per value, slightly larger index size, and faster range queries.
+     A precisionStep of 0 disables indexing at different precision levels.
+    -->
+    <fieldType name="tint" class="solr.TrieIntField" precisionStep="8" 
positionIncrementGap="0"/>
+    <fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" 
positionIncrementGap="0"/>
+    <fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" 
positionIncrementGap="0"/>
+    <fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" 
positionIncrementGap="0"/>
+
+    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, 
and
+         is a more restricted form of the canonical representation of dateTime
+         http://www.w3.org/TR/xmlschema-2/#dateTime    
+         The trailing "Z" designates UTC time and is mandatory.
+         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
+         All other components are mandatory.
+
+         Expressions can also be used to denote calculations that should be
+         performed relative to "NOW" to determine the value, ie...
+
+               NOW/HOUR
+                  ... Round to the start of the current hour
+               NOW-1DAY
+                  ... Exactly 1 day prior to now
+               NOW/DAY+6MONTHS+3DAYS
+                  ... 6 months and 3 days in the future from the start of
+                      the current day
+                      
+         Consult the TrieDateField javadocs for more information.
+
+         Note: For faster range queries, consider the tdate type
+      -->
+    <fieldType name="date" class="solr.TrieDateField" precisionStep="0" 
positionIncrementGap="0"/>
+
+    <!-- A Trie based date field for faster date range queries and date 
faceting. -->
+    <fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" 
positionIncrementGap="0"/>
+
+
+    <!--Binary data type. The data should be sent/retrieved in as Base64 
encoded Strings -->
+    <fieldType name="binary" class="solr.BinaryField"/>
+
+    <!-- The "RandomSortField" is not used to store or search any
+         data.  You can declare fields of this type it in your schema
+         to generate pseudo-random orderings of your docs for sorting 
+         or function purposes.  The ordering is generated based on the field
+         name and the version of the index. As long as the index version
+         remains unchanged, and the same field name is reused,
+         the ordering of the docs will be consistent.  
+         If you want different psuedo-random orderings of documents,
+         for the same version of the index, use a dynamicField and
+         change the field name in the request.
+     -->
+    <fieldType name="random" class="solr.RandomSortField" indexed="true" />
+
+    <!-- solr.TextField allows the specification of custom text analyzers
+         specified as a tokenizer and a list of token filters. Different
+         analyzers may be specified for indexing and querying.
+
+         The optional positionIncrementGap puts space between multiple fields 
of
+         this type on the same document, with the purpose of preventing false 
phrase
+         matching across fields.
+
+         For more info on customizing your analyzer chain, please see
+         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
+     -->
+
+    <!-- One can also specify an existing Analyzer class that has a
+         default constructor via the class attribute on the analyzer element.
+         Example:
+    <fieldType name="text_greek" class="solr.TextField">
+      <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
+    </fieldType>
+    -->
+
+    <!-- A text field that only splits on whitespace for exact matching of 
words -->
+    <fieldType name="text_ws" class="solr.TextField" 
positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- A general text field that has reasonable, generic
+         cross-language defaults: it tokenizes with StandardTokenizer,
+        removes stop words from case-insensitive "stopwords.txt"
+        (empty by default), and down cases.  At query time only, it
+        also applies synonyms. -->
+    <fieldType name="text_general" class="solr.TextField" 
positionIncrementGap="100">
+      <analyzer type="index">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" 
words="stopwords.txt" />
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" 
synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" 
words="stopwords.txt" />
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" 
ignoreCase="true" expand="true"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- A text field with defaults appropriate for English: it
+         tokenizes with StandardTokenizer, removes English stop words
+         (lang/stopwords_en.txt), down cases, protects words from 
protwords.txt, and
+         finally applies Porter's stemming.  The query time analyzer
+         also applies synonyms from synonyms.txt. -->
+    <fieldType name="text_en" class="solr.TextField" 
positionIncrementGap="100">
+      <analyzer type="index">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" 
synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <!-- Case insensitive stop word removal.
+        -->
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+                />
+        <filter class="solr.LowerCaseFilterFactory"/>
+       <filter class="solr.EnglishPossessiveFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" 
protected="protwords.txt"/>
+       <!-- Optionally you may want to use this less aggressive stemmer 
instead of PorterStemFilterFactory:
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+       -->
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" 
ignoreCase="true" expand="true"/>
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+                />
+        <filter class="solr.LowerCaseFilterFactory"/>
+       <filter class="solr.EnglishPossessiveFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" 
protected="protwords.txt"/>
+       <!-- Optionally you may want to use this less aggressive stemmer 
instead of PorterStemFilterFactory:
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+       -->
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- A text field with defaults appropriate for English, plus
+        aggressive word-splitting and autophrase features enabled.
+        This field is just like text_en, except it adds
+        WordDelimiterFilter to enable splitting and matching of
+        words on case-change, alpha numeric boundaries, and
+        non-alphanumeric chars.  This means certain compound word
+        cases will work, for example query "wi fi" will match
+        document "WiFi" or "wi-fi".
+        -->
+    <fieldType name="text_en_splitting" class="solr.TextField" 
positionIncrementGap="100" autoGeneratePhraseQueries="true">
+      <analyzer type="index">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" 
synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <!-- Case insensitive stop word removal.
+        -->
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+                />
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" 
generateNumberParts="1" catenateWords="1" catenateNumbers="1" catenateAll="0" 
splitOnCaseChange="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" 
protected="protwords.txt"/>
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" 
ignoreCase="true" expand="true"/>
+        <filter class="solr.StopFilterFactory"
+                ignoreCase="true"
+                words="lang/stopwords_en.txt"
+                />
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" 
generateNumberParts="1" catenateWords="0" catenateNumbers="0" catenateAll="0" 
splitOnCaseChange="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" 
protected="protwords.txt"/>
+        <filter class="solr.PorterStemFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- Less flexible matching, but less false matches.  Probably not ideal 
for product names,
+         but may be good for SKUs.  Can insert dashes in the wrong place and 
still match. -->
+    <fieldType name="text_en_splitting_tight" class="solr.TextField" 
positionIncrementGap="100" autoGeneratePhraseQueries="true">
+      <analyzer>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" 
ignoreCase="true" expand="false"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" 
words="lang/stopwords_en.txt"/>
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" 
generateNumberParts="0" catenateWords="1" catenateNumbers="1" catenateAll="0"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.KeywordMarkerFilterFactory" 
protected="protwords.txt"/>
+        <filter class="solr.EnglishMinimalStemFilterFactory"/>
+        <!-- this filter can remove any duplicate tokens that appear at the 
same position - sometimes
+             possible with WordDelimiterFilter in conjuncton with stemming. -->
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- Just like text_general except it reverses the characters of
+        each token, to enable more efficient leading wildcard queries. -->
+    <fieldType name="text_general_rev" class="solr.TextField" 
positionIncrementGap="100">
+      <analyzer type="index">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" 
words="stopwords.txt" />
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.ReversedWildcardFilterFactory" withOriginal="true"
+           maxPosAsterisk="3" maxPosQuestion="2" maxFractionAsterisk="0.33"/>
+      </analyzer>
+      <analyzer type="query">
+        <tokenizer class="solr.StandardTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" 
ignoreCase="true" expand="true"/>
+        <filter class="solr.StopFilterFactory" ignoreCase="true" 
words="stopwords.txt" />
+        <filter class="solr.LowerCaseFilterFactory"/>
+      </analyzer>
+    </fieldType>
+
+    <!-- This is an example of using the KeywordTokenizer along
+         With various TokenFilterFactories to produce a sortable field
+         that does not include some properties of the source text
+      -->
+    <fieldType name="alphaOnlySort" class="solr.TextField" 
sortMissingLast="true" omitNorms="true">
+      <analyzer>
+        <!-- KeywordTokenizer does no actual tokenizing, so the entire
+             input string is preserved as a single token
+          -->
+        <tokenizer class="solr.KeywordTokenizerFactory"/>
+        <!-- The LowerCase TokenFilter does what you expect, which can be
+             when you want your sorting to be case insensitive
+          -->
+        <filter class="solr.LowerCaseFilterFactory" />
+        <!-- The TrimFilter removes any leading or trailing whitespace -->
+        <filter class="solr.TrimFilterFactory" />
+        <!-- The PatternReplaceFilter gives you the flexibility to use
+             Java Regular expression to replace any sequence of characters
+             matching a pattern with an arbitrary replacement string, 
+             which may include back references to portions of the original
+             string matched by the pattern.
+             
+             See the Java Regular Expression documentation for more
+             information on pattern and replacement string syntax.
+             
+             
http://docs.oracle.com/javase/7/docs/api/java/util/regex/package-summary.html
+          -->
+        <filter class="solr.PatternReplaceFilterFactory"
+                pattern="([^a-z])" replacement="" replace="all"
+        />
+      </analyzer>
+    </fieldType>
+
+    <!-- lowercases the entire field value, keeping it as a single token.  -->
+    <fieldType name="lowercase" class="solr.TextField" 
positionIncrementGap="100">
+      <analyzer>
+        <tokenizer class="solr.KeywordTokenizerFactory"/>
+        <filter class="solr.LowerCaseFilterFactory" />
+      </analyzer>
+    </fieldType>
+
+    <!-- since fields of this type are by default not stored or indexed,
+         any data added to them will be ignored outright.  --> 
+    <fieldType name="ignored" stored="false" indexed="false" 
multiValued="true" class="solr.StrField" />
+
+    <!-- This point type indexes the coordinates as separate fields (subFields)
+      If subFieldType is defined, it references a type, and a dynamic field
+      definition is created matching *___<typename>.  Alternately, if 
+      subFieldSuffix is defined, that is used to create the subFields.
+      Example: if subFieldType="double", then the coordinates would be
+        indexed in fields myloc_0___double,myloc_1___double.
+      Example: if subFieldSuffix="_d" then the coordinates would be indexed
+        in fields myloc_0_d,myloc_1_d
+      The subFields are an implementation detail of the fieldType, and end
+      users normally should not need to know about them.
+     -->
+    <fieldType name="point" class="solr.PointType" dimension="2" 
subFieldSuffix="_d"/>
+
+    <!-- A specialized field for geospatial search. If indexed, this fieldType 
must not be multivalued. -->
+    <fieldType name="location" class="solr.LatLonType" 
subFieldSuffix="_coordinate"/>
+
+    <!-- Spatial rectangle (bounding box) field. It supports most spatial 
predicates, and has
+     special relevancy modes: score=overlapRatio|area|area2D (local-param to 
the query).  DocValues is recommended for
+     relevancy. -->
+    <fieldType name="bbox" class="solr.BBoxField"
+               geo="true" distanceUnits="kilometers" numberType="_bbox_coord" 
/>
+    <fieldType name="_bbox_coord" class="solr.TrieDoubleField" 
precisionStep="8" docValues="true" stored="false"/>
+
+   <!-- Money/currency field type. See 
http://wiki.apache.org/solr/MoneyFieldType
+        Parameters:
+          defaultCurrency: Specifies the default currency if none specified. 
Defaults to "USD"
+          precisionStep:   Specifies the precisionStep for the TrieLong field 
used for the amount
+          providerClass:   Lets you plug in other exchange provider backend:
+                           solr.FileExchangeRateProvider is the default and 
takes one parameter:
+                             currencyConfig: name of an xml file holding 
exchange rates
+                           solr.OpenExchangeRatesOrgProvider uses rates from 
openexchangerates.org:
+                             ratesFileLocation: URL or path to rates JSON file 
(default latest.json on the web)
+                             refreshInterval: Number of minutes between each 
rates fetch (default: 1440, min: 60)
+   -->
+    <fieldType name="currency" class="solr.CurrencyField" precisionStep="8" 
defaultCurrency="USD" currencyConfig="currency.xml" />
+
+    <!--Titan specific-->
+    <fieldType name="uuid"
+               class="solr.UUIDField"
+               indexed="true" />
+
+
+    <dynamicField name="*_uuid" type="uuid"     indexed="true"  stored="true"/>
+
+    <!-- TTL -->
+    <field name="ttl"  type="string" indexed="true" stored="true" />
+    <field name="expire_at" type="date" indexed="true" stored="true" />
+    <field name="timestamp" type="date" indexed="true" stored="true" />
+</schema>
diff --git a/dev-support/atlas-docker/config/solr/solrconfig.xml 
b/dev-support/atlas-docker/config/solr/solrconfig.xml
new file mode 100644
index 0000000..21d19ef
--- /dev/null
+++ b/dev-support/atlas-docker/config/solr/solrconfig.xml
@@ -0,0 +1,630 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- 
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml. 
+-->
+<config>
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+  -->
+  <luceneMatchVersion>5.0.0</luceneMatchVersion>
+
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <dataDir>${solr.data.dir:}</dataDir>
+
+
+  <!-- The DirectoryFactory to use for indexes.
+       
+       solr.StandardDirectoryFactory is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  solr.NRTCachingDirectoryFactory, the default,
+       wraps solr.StandardDirectoryFactory and caches small files in memory
+       for better NRT performance.
+
+       One can force a particular implementation via solr.MMapDirectoryFactory,
+       solr.NIOFSDirectoryFactory, or solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based, not
+       persistent, and doesn't work with replication.
+    -->
+  <directoryFactory name="DirectoryFactory" 
+                    
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}">
+
+      <!-- These will be used if you are using the solr.HdfsDirectoryFactory,
+         otherwise they will be ignored. If you don't plan on using hdfs,
+         you can safely remove this section. -->
+      <!-- The root directory that collection data should be written to. -->
+      <str name="solr.hdfs.home">${solr.hdfs.home:}</str>
+      <!-- The hadoop configuration files to use for the hdfs client. -->
+      <str name="solr.hdfs.confdir">${solr.hdfs.confdir:}</str>
+      <!-- Enable/Disable the hdfs cache. -->
+      <str 
name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</str>
+      <!-- Enable/Disable using one global cache for all SolrCores.
+           The settings used will be from the first HdfsDirectoryFactory 
created. -->
+      <str 
name="solr.hdfs.blockcache.global">${solr.hdfs.blockcache.global:true}</str>
+
+  </directoryFactory> 
+
+  <!-- The CodecFactory for defining the format of the inverted index.
+       The default implementation is SchemaCodecFactory, which is the official 
Lucene
+       index format, but hooks into the schema to provide per-field 
customization of
+       the postings lists and per-document values in the fieldType element
+       (postingsFormat/docValuesFormat). Note that most of the alternative 
implementations
+       are experimental, so if you choose to customize the index format, it's 
a good
+       idea to convert back to the official format e.g. via 
IndexWriter.addIndexes(IndexReader)
+       before upgrading to a newer version to avoid unnecessary reindexing.
+  -->
+  <codecFactory class="solr.SchemaCodecFactory"/>
+
+
+  <!-- To enable dynamic schema REST APIs, use the following for 
<schemaFactory>:
+
+       <schemaFactory class="ManagedIndexSchemaFactory">
+         <bool name="mutable">true</bool>
+         <str name="managedSchemaResourceName">managed-schema</str>
+       </schemaFactory>
+
+       When ManagedIndexSchemaFactory is specified, Solr will load the schema 
from
+       he resource named in 'managedSchemaResourceName', rather than from 
schema.xml.
+       Note that the managed schema resource CANNOT be named schema.xml.  If 
the managed
+       schema does not exist, Solr will create it after reading schema.xml, 
then rename
+       'schema.xml' to 'schema.xml.bak'.
+
+       Do NOT hand edit the managed schema - external modifications will be 
ignored and
+       overwritten as a result of schema modification REST API calls.
+
+       When ManagedIndexSchemaFactory is specified with mutable = true, schema
+       modification REST API calls will be allowed; otherwise, error responses 
will be
+       sent back for these requests.
+  <schemaFactory class="ClassicIndexSchemaFactory"/>
+  -->
+
+  <schemaFactory class="ManagedIndexSchemaFactory">
+         <bool name="mutable">true</bool>
+         <str name="managedSchemaResourceName">managed-schema</str>
+  </schemaFactory>
+
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Index Config - These settings control low-level behavior of indexing
+       Most example settings here show the default value, but are commented
+       out, to more easily see where customizations have been made.
+       
+       Note: This replaces <indexDefaults> and <mainIndex> from older versions
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 
-->
+  <indexConfig>
+
+    <!-- LockFactory 
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+      
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         Defaults: 'native' is default for Solr3.6 and later, otherwise
+                   'simple' is the default
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>${solr.lock.type:native}</lockType>
+
+    <!-- Lucene Infostream
+       
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting the value to true will instruct the underlying Lucene
+         IndexWriter to write its info stream to solr's log. By default,
+         this is enabled here, and controlled through log4j.properties.
+      -->
+     <infoStream>true</infoStream>
+  </indexConfig>
+
+
+  <!-- JMX
+       
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <jmx />
+  <!-- If you want to connect to a particular server, specify the
+       agentId 
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- Enables a transaction log, used for real-time get, durability, and
+         and solr cloud replica recovery.  The log can grow as big as
+         uncommitted changes to the index, so use of a hard autoCommit
+         is recommended (see below).
+         "dir" - the target directory for transaction logs, defaults to the
+                solr data directory.  --> 
+    <updateLog>
+      <str name="dir">${solr.ulog.dir:}</str>
+    </updateLog>
+ 
+    <!-- AutoCommit
+
+         Perform a hard commit automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents. 
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time in ms that is allowed to pass
+                   since a document was added before automatically
+                   triggering a new commit. 
+         openSearcher - if false, the commit causes recent index changes
+           to be flushed to stable storage, but does not cause a new
+           searcher to be opened to make those changes visible.
+
+         If the updateLog is enabled, then it's highly recommended to
+         have some sort of hard autoCommit to limit the log size.
+      -->
+     <autoCommit> 
+       <maxTime>${solr.autoCommit.maxTime:15000}</maxTime> 
+       <openSearcher>false</openSearcher> 
+     </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+      -->
+     <autoSoftCommit> 
+       <maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime> 
+     </autoSoftCommit>
+
+  </updateHandler>
+  
+  <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+       Query section - these settings control query time things like caches
+       ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 
-->
+  <query>
+    <!-- Max Boolean Clauses
+
+         Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.
+
+         ** WARNING **
+         
+         This option actually modifies a global Lucene property that
+         will affect all SolrCores.  If multiple solrconfig.xml files
+         disagree on this property, the value at any given moment will
+         be based on the last SolrCore to be initialized.
+         
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.  
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.  
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="2000"
+                 initialSize="2000"
+                 autowarmCount="1000"/>
+
+    <!-- Query Result Cache
+         
+         Caches results of searches - ordered lists of document ids
+         (DocList) based on a query, a sort, and the range of documents 
requested.  
+      -->
+    <queryResultCache class="solr.FastLRUCache"
+                      size="26000"
+                      initialSize="26000"
+                      autowarmCount="400"/>
+   
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.  
+      -->
+    <documentCache class="solr.FastLRUCache"
+                   size="26000"
+                   initialSize="26000"
+                   autowarmCount="400"/>
+    
+    <!-- custom cache currently used by block join --> 
+    <cache name="perSegFilter"
+      class="solr.search.LRUCache"
+      size="10"
+      initialSize="0"
+      autowarmCount="10"
+      regenerator="solr.NoOpRegenerator" />
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.  
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache. 
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+    <!-- Max Warming Searchers
+         
+         Maximum number of searchers that may be warming in the
+         background concurrently.  An error is returned if this limit
+         is exceeded.
+
+         Recommend values of 1-2 for read-only slaves, higher for
+         masters w/o cache warming.
+      -->
+    <maxWarmingSearchers>2</maxWarmingSearchers>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+       handleSelect is a legacy option that affects the behavior of requests
+       such as /select?qt=XXX
+
+       handleSelect="true" will cause the SolrDispatchFilter to process
+       the request and dispatch the query to a handler specified by the 
+       "qt" param, assuming "/select" isn't already registered.
+
+       handleSelect="false" will cause the SolrDispatchFilter to
+       ignore "/select" requests, resulting in a 404 unless a handler
+       is explicitly registered with the name "/select"
+
+       handleSelect="true" is not recommended for new users, but is the default
+       for backwards compatibility
+    -->
+  <requestDispatcher handleSelect="false" >
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size (in KiB) of
+         Multipart File Uploads that Solr will allow in a Request.
+         
+         formdataUploadLimitInKB - specifies the max size (in KiB) of
+         form data (application/x-www-form-urlencoded) sent via
+         POST. You can use POST to pass request parameters not
+         fitting into the URL.
+         
+         addHttpRequestToContext - if set to true, it will instruct
+         the requestParsers to include the original HttpServletRequest
+         object in the context map of the SolrQueryRequest under the 
+         key "httpRequest". It will not be used by any of the existing
+         Solr components, but may be useful when developing custom 
+         plugins.
+         
+         *** WARNING ***
+         The settings below authorize Solr to fetch remote files, You
+         should make sure your system has some authentication before
+         using enableRemoteStreaming="true"
+
+      --> 
+    <requestParsers enableRemoteStreaming="true" 
+                    multipartUploadLimitInKB="2048000"
+                    formdataUploadLimitInKB="2048"
+                    addHttpRequestToContext="false"/>
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+
+  </requestDispatcher>
+
+  <!-- Request Handlers 
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       Incoming queries will be dispatched to a specific handler by name
+       based on the path specified in the request.
+
+       Legacy behavior: If the request path uses "/select" but no Request
+       Handler has that name, and if handleSelect="true" has been specified in
+       the requestDispatcher, then the Request Handler is dispatched based on
+       the qt parameter.  Handlers without a leading '/' are accessed this way
+       like so: http://host/app/[core/]select?qt=name  If no qt is
+       given, then the requestHandler that declares default="true" will be
+       used or the one named "standard".
+
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <requestHandler name="/select" class="solr.SearchHandler">
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+     </lst>
+
+    </requestHandler>
+
+  <!-- A request handler that returns indented JSON by default -->
+  <requestHandler name="/query" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <str name="wt">json</str>
+       <str name="indent">true</str>
+       <str name="df">text</str>
+     </lst>
+  </requestHandler>
+
+
+  <!--
+    The export request handler is used to export full sorted result sets.
+    Do not change these defaults.
+  -->
+  <requestHandler name="/export" class="solr.SearchHandler">
+    <lst name="invariants">
+      <str name="rq">{!xport}</str>
+      <str name="wt">xsort</str>
+      <str name="distrib">false</str>
+    </lst>
+
+    <arr name="components">
+      <str>query</str>
+    </arr>
+  </requestHandler>
+
+
+  <initParams path="/update/**,/query,/select,/tvrh,/elevate,/spell">
+    <lst name="defaults">
+      <str name="df">text</str>
+    </lst>
+  </initParams>
+
+  <!-- Field Analysis Request Handler
+
+       RequestHandler that provides much the same functionality as
+       analysis.jsp. Provides the ability to specify multiple field
+       types and field names in the same request and outputs
+       index-time and query-time analysis for each of them.
+
+       Request parameters are:
+       analysis.fieldname - field name whose analyzers are to be used
+
+       analysis.fieldtype - field type whose analyzers are to be used
+       analysis.fieldvalue - text for index-time analysis
+       q (or analysis.q) - text for query time analysis
+       analysis.showmatch (true|false) - When set to true and when
+           query analysis is performed, the produced tokens of the
+           field value analysis will be marked as "matched" for every
+           token that is produces by the query analysis
+   -->
+  <requestHandler name="/analysis/field" 
+                  startup="lazy"
+                  class="solr.FieldAnalysisRequestHandler" />
+
+
+  <!-- Document Analysis Handler
+
+       http://wiki.apache.org/solr/AnalysisRequestHandler
+
+       An analysis handler that provides a breakdown of the analysis
+       process of provided documents. This handler expects a (single)
+       content stream with the following format:
+
+       <docs>
+         <doc>
+           <field name="id">1</field>
+           <field name="name">The Name</field>
+           <field name="text">The Text Value</field>
+         </doc>
+         <doc>...</doc>
+         <doc>...</doc>
+         ...
+       </docs>
+
+    Note: Each document must contain a field which serves as the
+    unique key. This key is used in the returned response to associate
+    an analysis breakdown to the analyzed document.
+
+    Like the FieldAnalysisRequestHandler, this handler also supports
+    query analysis by sending either an "analysis.query" or "q"
+    request parameter that holds the query text to be analyzed. It
+    also supports the "analysis.showmatch" parameter which when set to
+    true, all field tokens that match the query tokens will be marked
+    as a "match". 
+  -->
+  <requestHandler name="/analysis/document" 
+                  class="solr.DocumentAnalysisRequestHandler" 
+                  startup="lazy" />
+
+  <!-- Echo the request contents back to the client -->
+  <requestHandler name="/debug/dump" class="solr.DumpRequestHandler" >
+    <lst name="defaults">
+     <str name="echoParams">explicit</str> 
+     <str name="echoHandler">true</str>
+    </lst>
+  </requestHandler>
+  
+
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by 
+       instances of SearchHandler (which can access them by name)
+       
+       By default, the following components are available:
+       
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+       
+     -->
+
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+      <bool name="distrib">false</bool>
+    </lst>     
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Legacy config for the admin interface -->
+  <admin>
+    <defaultQuery>*:*</defaultQuery>
+  </admin>
+
+
+    <!--Titan specific-->
+    <updateRequestProcessorChain default="true">
+        <processor class="solr.TimestampUpdateProcessorFactory">
+            <str name="fieldName">timestamp</str>
+        </processor>
+        <processor class="solr.processor.DocExpirationUpdateProcessorFactory">
+            <int name="autoDeletePeriodSeconds">5</int>
+            <str name="ttlFieldName">ttl</str>
+            <str name="expirationFieldName">expire_at</str>
+        </processor>
+        <processor class="solr.FirstFieldValueUpdateProcessorFactory">
+            <str name="fieldName">expire_at_dt</str>
+        </processor>
+        <processor class="solr.LogUpdateProcessorFactory"/>
+        <processor class="solr.RunUpdateProcessorFactory"/>
+    </updateRequestProcessorChain>
+</config>
diff --git a/dev-support/atlas-docker/config/solr/stopwords.txt 
b/dev-support/atlas-docker/config/solr/stopwords.txt
new file mode 100644
index 0000000..ae1e83e
--- /dev/null
+++ b/dev-support/atlas-docker/config/solr/stopwords.txt
@@ -0,0 +1,14 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/dev-support/atlas-docker/config/solr/synonyms.txt 
b/dev-support/atlas-docker/config/solr/synonyms.txt
new file mode 100644
index 0000000..7f72128
--- /dev/null
+++ b/dev-support/atlas-docker/config/solr/synonyms.txt
@@ -0,0 +1,29 @@
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#-----------------------------------------------------------------------
+#some test synonym mappings unlikely to appear in real input text
+aaafoo => aaabar
+bbbfoo => bbbfoo bbbbar
+cccfoo => cccbar cccbaz
+fooaaa,baraaa,bazaaa
+
+# Some synonym groups specific to this example
+GB,gib,gigabyte,gigabytes
+MB,mib,megabyte,megabytes
+Television, Televisions, TV, TVs
+#notice we use "gib" instead of "GiB" so any WordDelimiterFilter coming
+#after us won't split it into two words.
+
+# Synonym mappings can be used for spelling correction too
+pixima => pixma
+
diff --git a/dev-support/atlas-docker/docker-compose.atlas-hadoop.yml 
b/dev-support/atlas-docker/docker-compose.atlas-hadoop.yml
new file mode 100644
index 0000000..b172e54
--- /dev/null
+++ b/dev-support/atlas-docker/docker-compose.atlas-hadoop.yml
@@ -0,0 +1,31 @@
+version: '3'
+services:
+  atlas-hadoop:
+    build:
+      context: .
+      dockerfile: Dockerfile.atlas-hadoop
+      args:
+        - HADOOP_VERSION=${HADOOP_VERSION}
+    image: atlas-hadoop
+    container_name: atlas-hadoop
+    hostname: atlas-hadoop.example.com
+    stdin_open: true
+    tty: true
+    networks:
+      - atlas
+    ports:
+      - "9000:9000"
+      - "8088:8088"
+    depends_on:
+      - atlas-base
+    healthcheck:
+      test: "hdfs dfs -ls /"
+      interval: 1m30s
+      timeout: 10s
+      retries: 30
+      start_period: 40s
+    environment:
+      - HADOOP_VERSION
+
+networks:
+  atlas:
diff --git a/dev-support/atlas-docker/docker-compose.atlas-hbase.yml 
b/dev-support/atlas-docker/docker-compose.atlas-hbase.yml
new file mode 100644
index 0000000..7157889
--- /dev/null
+++ b/dev-support/atlas-docker/docker-compose.atlas-hbase.yml
@@ -0,0 +1,30 @@
+version: '3'
+services:
+  atlas-hbase:
+    build:
+      context: .
+      dockerfile: Dockerfile.atlas-hbase
+      args:
+        - HBASE_VERSION=${HBASE_VERSION}
+    image: atlas-hbase
+    container_name: atlas-hbase
+    hostname: atlas-hbase.example.com
+    stdin_open: true
+    tty: true
+    networks:
+      - atlas
+    ports:
+      - "16000:16000"
+      - "16010:16010"
+      - "16020:16020"
+      - "16030:16030"
+    depends_on:
+      atlas-hadoop:
+        condition: service_healthy
+      atlas-zk:
+        condition: service_started
+    environment:
+      - HBASE_VERSION
+
+networks:
+  atlas:
diff --git a/dev-support/atlas-docker/docker-compose.atlas-kafka.yml 
b/dev-support/atlas-docker/docker-compose.atlas-kafka.yml
new file mode 100644
index 0000000..1f70521
--- /dev/null
+++ b/dev-support/atlas-docker/docker-compose.atlas-kafka.yml
@@ -0,0 +1,27 @@
+version: '3'
+services:
+  atlas-kafka:
+    build:
+      context: .
+      dockerfile: Dockerfile.atlas-kafka
+      args:
+        - KAFKA_VERSION=${KAFKA_VERSION}
+        - ATLAS_VERSION=${ATLAS_VERSION}
+    image: atlas-kafka
+    container_name: atlas-kafka
+    hostname: atlas-kafka.example.com
+    stdin_open: true
+    tty: true
+    networks:
+      - atlas
+    ports:
+      - "9092:9092"
+    depends_on:
+      atlas-zk:
+        condition: service_started
+    environment:
+      - KAFKA_VERSION
+      - ATLAS_VERSION
+
+networks:
+  atlas:
diff --git a/dev-support/atlas-docker/docker-compose.atlas.yml 
b/dev-support/atlas-docker/docker-compose.atlas.yml
index cfaf0d7..561c475 100644
--- a/dev-support/atlas-docker/docker-compose.atlas.yml
+++ b/dev-support/atlas-docker/docker-compose.atlas.yml
@@ -18,11 +18,42 @@ services:
     ports:
       - "21000:21000"
     depends_on:
-      - atlas-base
+      atlas-hbase:
+        condition: service_started
+      atlas-kafka:
+        condition: service_started
+      atlas-solr:
+        condition: service_started
+      atlas-zk:
+        condition: service_started
     environment:
       - ATLAS_VERSION
     command:
       - /home/atlas/scripts/atlas.sh
 
+  atlas-zk:
+    build:
+      context: .
+      dockerfile: Dockerfile.atlas-zk
+    image: atlas-zk
+    container_name: atlas-zk
+    hostname: atlas-zk.example.com
+    networks:
+      - atlas
+    ports:
+      - "2181:2181"
+
+  atlas-solr:
+    build:
+      context: .
+      dockerfile: Dockerfile.atlas-solr
+    image: atlas-solr
+    container_name: atlas-solr
+    hostname: atlas-solr.example.com
+    networks:
+      - atlas
+    ports:
+      - "8983:8983"
+
 networks:
   atlas:
diff --git a/dev-support/atlas-docker/download-archives.sh 
b/dev-support/atlas-docker/download-archives.sh
new file mode 100755
index 0000000..dc6d22c
--- /dev/null
+++ b/dev-support/atlas-docker/download-archives.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Downloads HDFS/Hive/HBase/Kafka/.. archives to a local cache directory.
+# The downloaded archives will be used while building docker images that
+# run these services
+#
+
+
+#
+# source .env file to get versions to download
+#
+source .env
+
+
+downloadIfNotPresent() {
+  local fileName=$1
+  local urlBase=$2
+
+  if [ ! -f "downloads/${fileName}" ]
+  then
+    echo "downloading ${urlBase}/${fileName}.."
+
+    curl -L ${urlBase}/${fileName} --output downloads/${fileName}
+  else
+    echo "file already in cache: ${fileName}"
+  fi
+}
+
+downloadIfNotPresent hadoop-${HADOOP_VERSION}.tar.gz        
https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}
+downloadIfNotPresent hbase-${HBASE_VERSION}-bin.tar.gz      
https://archive.apache.org/dist/hbase/${HBASE_VERSION}
+downloadIfNotPresent kafka_2.12-${KAFKA_VERSION}.tgz        
https://archive.apache.org/dist/kafka/${KAFKA_VERSION}
+
diff --git a/dev-support/atlas-docker/downloads/.gitignore 
b/dev-support/atlas-docker/downloads/.gitignore
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/dev-support/atlas-docker/downloads/.gitignore
@@ -0,0 +1 @@
+*
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/scripts/atlas-hadoop-mkdir.sh
similarity index 57%
copy from dev-support/atlas-docker/scripts/atlas.sh
copy to dev-support/atlas-docker/scripts/atlas-hadoop-mkdir.sh
index 7abb6f7..2334ded 100755
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/scripts/atlas-hadoop-mkdir.sh
@@ -16,28 +16,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
-
-if [ ! -e ${ATLAS_HOME}/.setupDone ]
-then
-  SETUP_ATLAS=true
-else
-  SETUP_ATLAS=false
-fi
-
-if [ "${SETUP_ATLAS}" == "true" ]
-then
-  encryptedPwd=$(${ATLAS_HOME}/bin/cputil.py -g -u admin -p atlasR0cks! -s)
-
-  echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
-
-  chown -R atlas:atlas ${ATLAS_HOME}/
-
-  touch ${ATLAS_HOME}/.setupDone
-fi
-
-su -c "cd ${ATLAS_HOME}/bin && ./atlas_start.py" atlas
-ATLAS_PID=`ps -ef  | grep -v grep | grep -i "org.apache.atlas.Atlas" | awk 
'{print $2}'`
-
-# prevent the container from exiting
-tail --pid=$ATLAS_PID -f /dev/null
+# setup directories for HBase
+${HADOOP_HOME}/bin/hdfs dfs -mkdir /hbase
+${HADOOP_HOME}/bin/hdfs dfs -chown hbase:hadoop /hbase
+
+# setup directories for Hive
+${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /user/hive/warehouse
+${HADOOP_HOME}/bin/hdfs dfs -mkdir -p /tmp/hive
+${HADOOP_HOME}/bin/hdfs dfs -chown -R hive:hadoop /tmp/hive /user/hive
+${HADOOP_HOME}/bin/hdfs dfs -chmod 777 /tmp/hive
diff --git a/dev-support/atlas-docker/scripts/atlas-hadoop-setup.sh 
b/dev-support/atlas-docker/scripts/atlas-hadoop-setup.sh
new file mode 100755
index 0000000..d299feb
--- /dev/null
+++ b/dev-support/atlas-docker/scripts/atlas-hadoop-setup.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "export JAVA_HOME=${JAVA_HOME}" >> ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh
+
+cat <<EOF > /etc/ssh/ssh_config
+Host *
+   StrictHostKeyChecking no
+   UserKnownHostsFile=/dev/null
+EOF
+
+cat <<EOF > ${HADOOP_HOME}/etc/hadoop/core-site.xml
+<configuration>
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://atlas-hadoop:9000</value>
+  </property>
+</configuration>
+EOF
+
+cat <<EOF > ${HADOOP_HOME}/etc/hadoop/hdfs-site.xml
+<configuration>
+  <property>
+    <name>dfs.replication</name>
+    <value>1</value>
+  </property>
+</configuration>
+EOF
+
+cat <<EOF > ${HADOOP_HOME}/etc/hadoop/yarn-site.xml
+<configuration>
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.env-whitelist</name>
+    
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
+  </property>
+</configuration>
+EOF
+
+mkdir -p /opt/hadoop/logs
+chown -R hdfs:hadoop /opt/hadoop/
+chmod g+w /opt/hadoop/logs
diff --git a/dev-support/atlas-docker/scripts/atlas-hadoop.sh 
b/dev-support/atlas-docker/scripts/atlas-hadoop.sh
new file mode 100755
index 0000000..b33fc16
--- /dev/null
+++ b/dev-support/atlas-docker/scripts/atlas-hadoop.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+service ssh start
+
+CREATE_HDFS_DIR=false
+
+if [ ! -e ${HADOOP_HOME}/.setupDone ]
+then
+  su -c "ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa" hdfs
+  su -c "cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys" hdfs
+  su -c "chmod 0600 ~/.ssh/authorized_keys" hdfs
+
+  su -c "ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa" yarn
+  su -c "cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys" yarn
+  su -c "chmod 0600 ~/.ssh/authorized_keys" yarn
+
+  echo "ssh" > /etc/pdsh/rcmd_default
+
+  ${ATLAS_SCRIPTS}/atlas-hadoop-setup.sh
+
+  su -c "${HADOOP_HOME}/bin/hdfs namenode -format" hdfs
+
+  CREATE_HDFS_DIR=true
+  touch ${HADOOP_HOME}/.setupDone
+fi
+
+su -c "${HADOOP_HOME}/sbin/start-dfs.sh" hdfs
+su -c "${HADOOP_HOME}/sbin/start-yarn.sh" yarn
+
+if [ "${CREATE_HDFS_DIR}" == "true" ]
+then
+  su -c "${ATLAS_SCRIPTS}/atlas-hadoop-mkdir.sh" hdfs
+fi
+
+NAMENODE_PID=`ps -ef  | grep -v grep | grep -i 
"org.apache.hadoop.hdfs.server.namenode.NameNode" | awk '{print $2}'`
+
+# prevent the container from exiting
+tail --pid=$NAMENODE_PID -f /dev/null
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/scripts/atlas-hbase-setup.sh
similarity index 57%
copy from dev-support/atlas-docker/scripts/atlas.sh
copy to dev-support/atlas-docker/scripts/atlas-hbase-setup.sh
index 7abb6f7..c576989 100755
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/scripts/atlas-hbase-setup.sh
@@ -16,28 +16,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
+echo "export JAVA_HOME=${JAVA_HOME}" >> ${HBASE_HOME}/conf/hbase-env.sh
 
-if [ ! -e ${ATLAS_HOME}/.setupDone ]
-then
-  SETUP_ATLAS=true
-else
-  SETUP_ATLAS=false
-fi
+cat <<EOF > /etc/ssh/ssh_config
+Host *
+   StrictHostKeyChecking no
+   UserKnownHostsFile=/dev/null
+EOF
 
-if [ "${SETUP_ATLAS}" == "true" ]
-then
-  encryptedPwd=$(${ATLAS_HOME}/bin/cputil.py -g -u admin -p atlasR0cks! -s)
-
-  echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
-
-  chown -R atlas:atlas ${ATLAS_HOME}/
-
-  touch ${ATLAS_HOME}/.setupDone
-fi
-
-su -c "cd ${ATLAS_HOME}/bin && ./atlas_start.py" atlas
-ATLAS_PID=`ps -ef  | grep -v grep | grep -i "org.apache.atlas.Atlas" | awk 
'{print $2}'`
-
-# prevent the container from exiting
-tail --pid=$ATLAS_PID -f /dev/null
+cp ${ATLAS_SCRIPTS}/hbase-site.xml /opt/hbase/conf/hbase-site.xml
+chown -R hbase:hadoop /opt/hbase/
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/scripts/atlas-hbase.sh
similarity index 60%
copy from dev-support/atlas-docker/scripts/atlas.sh
copy to dev-support/atlas-docker/scripts/atlas-hbase.sh
index 7abb6f7..81b382b 100755
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/scripts/atlas-hbase.sh
@@ -16,28 +16,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
+service ssh start
 
-if [ ! -e ${ATLAS_HOME}/.setupDone ]
+if [ ! -e ${HBASE_HOME}/.setupDone ]
 then
-  SETUP_ATLAS=true
-else
-  SETUP_ATLAS=false
-fi
-
-if [ "${SETUP_ATLAS}" == "true" ]
-then
-  encryptedPwd=$(${ATLAS_HOME}/bin/cputil.py -g -u admin -p atlasR0cks! -s)
+  su -c "ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa" hbase
+  su -c "cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys" hbase
+  su -c "chmod 0600 ~/.ssh/authorized_keys" hbase
 
-  echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
+  echo "ssh" > /etc/pdsh/rcmd_default
 
-  chown -R atlas:atlas ${ATLAS_HOME}/
+  ${ATLAS_SCRIPTS}/atlas-hbase-setup.sh
 
-  touch ${ATLAS_HOME}/.setupDone
+  touch ${HBASE_HOME}/.setupDone
 fi
 
-su -c "cd ${ATLAS_HOME}/bin && ./atlas_start.py" atlas
-ATLAS_PID=`ps -ef  | grep -v grep | grep -i "org.apache.atlas.Atlas" | awk 
'{print $2}'`
+su -c "${HBASE_HOME}/bin/start-hbase.sh" hbase
+
+HBASE_MASTER_PID=`ps -ef  | grep -v grep | grep -i 
"org.apache.hadoop.hbase.master.HMaster" | awk '{print $2}'`
 
 # prevent the container from exiting
-tail --pid=$ATLAS_PID -f /dev/null
+tail --pid=$HBASE_MASTER_PID -f /dev/null
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/scripts/atlas-kafka-setup.sh
similarity index 57%
copy from dev-support/atlas-docker/scripts/atlas.sh
copy to dev-support/atlas-docker/scripts/atlas-kafka-setup.sh
index 7abb6f7..2946aeb 100755
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/scripts/atlas-kafka-setup.sh
@@ -16,28 +16,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
+cat <<EOF > /etc/ssh/ssh_config
+Host *
+   StrictHostKeyChecking no
+   UserKnownHostsFile=/dev/null
+EOF
 
-if [ ! -e ${ATLAS_HOME}/.setupDone ]
-then
-  SETUP_ATLAS=true
-else
-  SETUP_ATLAS=false
-fi
+sed -i 's/localhost:2181/atlas-zk.example.com:2181/' 
/opt/kafka/config/server.properties
 
-if [ "${SETUP_ATLAS}" == "true" ]
-then
-  encryptedPwd=$(${ATLAS_HOME}/bin/cputil.py -g -u admin -p atlasR0cks! -s)
-
-  echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
-
-  chown -R atlas:atlas ${ATLAS_HOME}/
-
-  touch ${ATLAS_HOME}/.setupDone
-fi
-
-su -c "cd ${ATLAS_HOME}/bin && ./atlas_start.py" atlas
-ATLAS_PID=`ps -ef  | grep -v grep | grep -i "org.apache.atlas.Atlas" | awk 
'{print $2}'`
-
-# prevent the container from exiting
-tail --pid=$ATLAS_PID -f /dev/null
+chown -R kafka:hadoop /opt/kafka/
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/scripts/atlas-kafka.sh
similarity index 58%
copy from dev-support/atlas-docker/scripts/atlas.sh
copy to dev-support/atlas-docker/scripts/atlas-kafka.sh
index 7abb6f7..6c0fad0 100755
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/scripts/atlas-kafka.sh
@@ -16,28 +16,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -x
+service ssh start
 
-if [ ! -e ${ATLAS_HOME}/.setupDone ]
+if [ ! -e ${KAFKA_HOME}/.setupDone ]
 then
-  SETUP_ATLAS=true
-else
-  SETUP_ATLAS=false
-fi
-
-if [ "${SETUP_ATLAS}" == "true" ]
-then
-  encryptedPwd=$(${ATLAS_HOME}/bin/cputil.py -g -u admin -p atlasR0cks! -s)
+  su -c "ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa" kafka
+  su -c "cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys" kafka
+  su -c "chmod 0600 ~/.ssh/authorized_keys" kafka
 
-  echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
+  echo "ssh" > /etc/pdsh/rcmd_default
 
-  chown -R atlas:atlas ${ATLAS_HOME}/
+  ${ATLAS_SCRIPTS}/atlas-kafka-setup.sh
 
-  touch ${ATLAS_HOME}/.setupDone
+  touch ${KAFKA_HOME}/.setupDone
 fi
 
-su -c "cd ${ATLAS_HOME}/bin && ./atlas_start.py" atlas
-ATLAS_PID=`ps -ef  | grep -v grep | grep -i "org.apache.atlas.Atlas" | awk 
'{print $2}'`
-
-# prevent the container from exiting
-tail --pid=$ATLAS_PID -f /dev/null
+su -c "cd ${KAFKA_HOME} && CLASSPATH=${KAFKA_HOME}/config 
./bin/kafka-server-start.sh config/server.properties" kafka
diff --git a/dev-support/atlas-docker/scripts/atlas-solr-create.sh 
b/dev-support/atlas-docker/scripts/atlas-solr-create.sh
new file mode 100755
index 0000000..8b1d75d
--- /dev/null
+++ b/dev-support/atlas-docker/scripts/atlas-solr-create.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+# Create Solr collections used by Atlas
+precreate-core vertex_index   /opt/solr/server/solr/configsets/atlas/
+precreate-core edge_index     /opt/solr/server/solr/configsets/atlas/
+precreate-core fulltext_index /opt/solr/server/solr/configsets/atlas/
diff --git a/dev-support/atlas-docker/scripts/atlas.sh 
b/dev-support/atlas-docker/scripts/atlas.sh
index 7abb6f7..57d8021 100755
--- a/dev-support/atlas-docker/scripts/atlas.sh
+++ b/dev-support/atlas-docker/scripts/atlas.sh
@@ -31,6 +31,17 @@ then
 
   echo "admin=ADMIN::${encryptedPwd}" > 
${ATLAS_HOME}/conf/users-credentials.properties
 
+  sed -i 
"s/atlas.graph.storage.hostname=.*$/atlas.graph.storage.hostname=atlas-zk.example.com:2181/"
             /opt/atlas/conf/atlas-application.properties
+  sed -i 
"s/atlas.audit.hbase.zookeeper.quorum=.*$/atlas.audit.hbase.zookeeper.quorum=atlas-zk.example.com:2181/"
 /opt/atlas/conf/atlas-application.properties
+
+  sed -i "s/^atlas.graph.index.search.solr.mode=cloud/# 
atlas.graph.index.search.solr.mode=cloud/"                                      
        /opt/atlas/conf/atlas-application.properties
+  sed -i "s/^# 
*atlas.graph.index.search.solr.mode=http/atlas.graph.index.search.solr.mode=http/"
                                               
/opt/atlas/conf/atlas-application.properties
+  sed -i 
"s/^.*atlas.graph.index.search.solr.http-urls=.*$/atlas.graph.index.search.solr.http-urls=http:\/\/atlas-solr.example.com:8983\/solr/"
 /opt/atlas/conf/atlas-application.properties
+
+  sed -i 
"s/atlas.notification.embedded=.*$/atlas.notification.embedded=false/"          
                  /opt/atlas/conf/atlas-application.properties
+  sed -i 
"s/atlas.kafka.zookeeper.connect=.*$/atlas.kafka.zookeeper.connect=atlas-zk.example.com:2181/"
    /opt/atlas/conf/atlas-application.properties
+  sed -i 
"s/atlas.kafka.bootstrap.servers=.*$/atlas.kafka.bootstrap.servers=atlas-kafka.example.com:9092/"
 /opt/atlas/conf/atlas-application.properties
+
   chown -R atlas:atlas ${ATLAS_HOME}/
 
   touch ${ATLAS_HOME}/.setupDone
diff --git a/dev-support/atlas-docker/scripts/hbase-site.xml 
b/dev-support/atlas-docker/scripts/hbase-site.xml
new file mode 100644
index 0000000..dd8828f
--- /dev/null
+++ b/dev-support/atlas-docker/scripts/hbase-site.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+--><configuration>
+  <!--
+    The following properties are set for running HBase as a single process on a
+    developer workstation. With this configuration, HBase is running in
+    "stand-alone" mode and without a distributed file system. In this mode, and
+    without further configuration, HBase and ZooKeeper data are stored on the
+    local filesystem, in a path under the value configured for `hbase.tmp.dir`.
+    This value is overridden from its default value of `/tmp` because many
+    systems clean `/tmp` on a regular basis. Instead, it points to a path 
within
+    this HBase installation directory.
+
+    Running against the `LocalFileSystem`, as opposed to a distributed
+    filesystem, runs the risk of data integrity issues and data loss. Normally
+    HBase will refuse to run in such an environment. Setting
+    `hbase.unsafe.stream.capability.enforce` to `false` overrides this 
behavior,
+    permitting operation. This configuration is for the developer workstation
+    only and __should not be used in production!__
+
+    See also https://hbase.apache.org/book.html#standalone_dist
+  -->
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://atlas-hadoop.example.com:9000/hbase</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>atlas-zk.example.com</value>
+  </property>
+</configuration>
diff --git a/distro/pom.xml b/distro/pom.xml
index c00de39..d84f5e7 100644
--- a/distro/pom.xml
+++ b/distro/pom.xml
@@ -65,7 +65,6 @@ atlas.graph.index.search.solr.wait-searcher=true
 #for distributed mode, specify zookeeper quorum here
 atlas.graph.storage.hostname=
 atlas.graph.storage.hbase.regions-per-server=1
-atlas.graph.storage.lock.wait-time=10000
 
 #In order to use Cassandra as a backend, comment out the hbase specific 
properties above, and uncomment the
 #the following properties
@@ -333,7 +332,6 @@ atlas.graph.index.search.solr.wait-searcher=true
 #for distributed mode, specify zookeeper quorum here
 atlas.graph.storage.hostname=localhost
 atlas.graph.storage.hbase.regions-per-server=1
-atlas.graph.storage.lock.wait-time=10000
                 </graph.storage.properties>
                 <hbase.dir>${project.build.directory}/hbase</hbase.dir>
                 <hbase.embedded>true</hbase.embedded>
diff --git a/pom.xml b/pom.xml
index df4dec5..092ba09 100644
--- a/pom.xml
+++ b/pom.xml
@@ -698,7 +698,7 @@
         <guava.version>25.1-jre</guava.version>
         <guice.version>4.1.0</guice.version>
         
<hadoop.hdfs-client.version>${hadoop.version}</hadoop.hdfs-client.version>
-        <hadoop.version>3.1.1</hadoop.version>
+        <hadoop.version>3.3.0</hadoop.version>
         <hbase.version>2.3.3</hbase.version>
         <hive.version>3.1.0</hive.version>
         <hppc.version>0.8.1</hppc.version>
diff --git a/webapp/pom.xml b/webapp/pom.xml
index 7d61735..ccbcc31 100755
--- a/webapp/pom.xml
+++ b/webapp/pom.xml
@@ -98,12 +98,6 @@
         <dependency>
             <groupId>org.apache.atlas</groupId>
             <artifactId>atlas-common</artifactId>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.fasterxml.jackson.core</groupId>
-                    <artifactId>*</artifactId>
-                </exclusion>
-            </exclusions>
         </dependency>
 
         <dependency>
@@ -494,6 +488,12 @@
             <groupId>org.keycloak</groupId>
             <artifactId>keycloak-spring-security-adapter</artifactId>
             <version>${keycloak.version}</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-core</artifactId>
+                </exclusion>
+            </exclusions>
         </dependency>
 
         <dependency>
@@ -519,6 +519,12 @@
          <artifactId>log4j-api</artifactId>
          <version>${log4j2.version}</version>
        </dependency>
+
+        <dependency>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-core</artifactId>
+            <version>${jackson.version}</version>
+        </dependency>
     </dependencies>
 
     <build>

Reply via email to