http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-base/nexus-ingest.sh
----------------------------------------------------------------------
diff --git a/docker/ingest-base/nexus-ingest.sh 
b/docker/ingest-base/nexus-ingest.sh
new file mode 100755
index 0000000..2380c30
--- /dev/null
+++ b/docker/ingest-base/nexus-ingest.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+ 
+# NOTE: This requires GNU getopt.  On Mac OS X and FreeBSD, you have to 
install this
+# separately; see below.
+TEMP=`getopt -o scah --long singleNode,container,admin,help -n 'nexus-ingest' 
-- "$@"`
+
+if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
+
+# Note the quotes around `$TEMP': they are essential!
+eval set -- "$TEMP"
+
+SINGLENODE=false
+CONTAINER=false
+ADMIN=false
+while true; do
+  case "$1" in
+    -s | --singleNode ) SINGLENODE=true; shift ;;
+    -c | --container ) CONTAINER=true; shift ;;
+    -a | --admin ) ADMIN=true; shift ;;
+    -h | --help ) 
+        echo "usage: nexus-ingest [-s|--singleNode] [-c|--container] 
[-a|--admin]" >&2
+        exit 2
+        ;;
+    -- ) shift; break ;;
+    * ) break ;;
+  esac
+done
+
+if [ "$SINGLENODE" = true ]; then
+    source activate nexus-xd-python-modules
+    
+    export JAVA_OPTS="-Dgrape.root=/usr/local/repositories/.groovy/grapes 
-Dgroovy.root=/usr/local/repositories/.groovy/ 
-Dgrape.config=/usr/local/repositories/.groovy/grapeConfig.xml"
+    
+    xd-singlenode --hadoopDistro none
+elif [ "$CONTAINER"  = true ]; then
+    source activate nexus-xd-python-modules
+    export 
SPRING_DATASOURCE_URL="jdbc:mysql://$MYSQL_PORT_3306_TCP_ADDR:$MYSQL_PORT_3306_TCP_PORT/xdjob"
+    export SPRING_DATASOURCE_USERNAME=$MYSQL_USER
+    export SPRING_DATASOURCE_PASSWORD=$MYSQL_PASSWORD
+    export SPRING_DATASOURCE_DRIVERCLASSNAME="com.mysql.jdbc.Driver"
+
+    export ZK_NAMESPACE=$ZOOKEEPER_XD_CHROOT
+    export ZK_CLIENT_CONNECT=$ZOOKEEPER_CONNECT
+    export ZK_CLIENT_SESSIONTIMEOUT=60000
+    export ZK_CLIENT_CONNECTIONTIMEOUT=30000
+    export ZK_CLIENT_INITIALRETRYWAIT=1000
+    export ZK_CLIENT_RETRYMAXATTEMPTS=3
+
+    export SPRING_REDIS_HOST=$REDIS_ADDR
+    export SPRING_REDIS_PORT=$REDIS_PORT
+    
+    export XD_TRANSPORT="kafka"
+    export XD_MESSAGEBUS_KAFKA_BROKERS=$KAFKA_BROKERS
+    export XD_MESSAGEBUS_KAFKA_ZKADDRESS=$KAFKA_ZKADDRESS
+    export XD_MESSAGEBUS_KAFKA_MODE="embeddedHeaders"
+    export XD_MESSAGEBUS_KAFKA_OFFSETMANAGEMENT="kafkaNative"
+    export XD_MESSAGEBUS_KAFKA_HEADERS="absolutefilepath,spec"
+    export XD_MESSAGEBUS_KAFKA_SOCKETBUFFERSIZE=3097152
+    export XD_MESSAGEBUS_KAFKA_DEFAULT_QUEUESIZE=4
+    export XD_MESSAGEBUS_KAFKA_DEFAULT_FETCHSIZE=2048576
+    
+    export JAVA_OPTS="-Dgrape.root=/usr/local/repositories/.groovy/grapes 
-Dgroovy.root=/usr/local/repositories/.groovy/ 
-Dgrape.config=/usr/local/repositories/.groovy/grapeConfig.xml"
+    
+    until nc --send-only -v -w30 $MYSQL_PORT_3306_TCP_ADDR 
$MYSQL_PORT_3306_TCP_PORT </dev/null
+    do
+      echo "Waiting for database connection..."
+      # wait for 5 seconds before check again
+      sleep 5
+    done
+    
+    xd-container --hadoopDistro none
+elif [ "$ADMIN"  = true ]; then
+    source activate nexus-xd-python-modules
+    export 
SPRING_DATASOURCE_URL="jdbc:mysql://$MYSQL_PORT_3306_TCP_ADDR:$MYSQL_PORT_3306_TCP_PORT/xdjob"
+    export SPRING_DATASOURCE_USERNAME=$MYSQL_USER
+    export SPRING_DATASOURCE_PASSWORD=$MYSQL_PASSWORD
+    export SPRING_DATASOURCE_DRIVERCLASSNAME="com.mysql.jdbc.Driver"
+
+    export ZK_NAMESPACE=$ZOOKEEPER_XD_CHROOT
+    export ZK_CLIENT_CONNECT=$ZOOKEEPER_CONNECT
+    export ZK_CLIENT_SESSIONTIMEOUT=60000
+    export ZK_CLIENT_CONNECTIONTIMEOUT=30000
+    export ZK_CLIENT_INITIALRETRYWAIT=1000
+    export ZK_CLIENT_RETRYMAXATTEMPTS=3
+
+    export SPRING_REDIS_HOST=$REDIS_ADDR
+    export SPRING_REDIS_PORT=$REDIS_PORT
+    
+    export XD_TRANSPORT="kafka"
+    export XD_MESSAGEBUS_KAFKA_BROKERS=$KAFKA_BROKERS
+    export XD_MESSAGEBUS_KAFKA_ZKADDRESS=$KAFKA_ZKADDRESS
+    export XD_MESSAGEBUS_KAFKA_MODE="embeddedHeaders"
+    export XD_MESSAGEBUS_KAFKA_OFFSETMANAGEMENT="kafkaNative"
+    export XD_MESSAGEBUS_KAFKA_HEADERS="absolutefilepath,spec"
+    export XD_MESSAGEBUS_KAFKA_SOCKETBUFFERSIZE=3097152
+    export XD_MESSAGEBUS_KAFKA_DEFAULT_QUEUESIZE=4
+    export XD_MESSAGEBUS_KAFKA_DEFAULT_FETCHSIZE=2048576
+    
+    export JAVA_OPTS="-Dgrape.root=/usr/local/repositories/.groovy/grapes 
-Dgroovy.root=/usr/local/repositories/.groovy/ 
-Dgrape.config=/usr/local/repositories/.groovy/grapeConfig.xml"
+    
+    until nc --send-only -v -w30 $MYSQL_PORT_3306_TCP_ADDR 
$MYSQL_PORT_3306_TCP_PORT </dev/null
+    do
+      echo "Waiting for database connection..."
+      # wait for 5 seconds before check again
+      sleep 5
+    done
+    
+    zookeeper-client -server $ZK_CLIENT_CONNECT -cmd create 
/$ZOOKEEPER_XD_CHROOT ""
+    
+    xd-admin --hadoopDistro none
+else
+    echo "One of -s, -c, or -a is required."
+    echo "usage: nexus-ingest [-s|--singleNode] [-c|--container] [-a|--admin]" 
>&2
+    exit 3
+fi
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-base/stream-definitions
----------------------------------------------------------------------
diff --git a/docker/ingest-base/stream-definitions 
b/docker/ingest-base/stream-definitions
new file mode 100644
index 0000000..0bd79e3
--- /dev/null
+++ b/docker/ingest-base/stream-definitions
@@ -0,0 +1,17 @@
+
+stream create --name ingest-avhrr --definition "scan-for-avhrr-granules: file 
--dir=/usr/local/data/nexus/AVHRR_L4_GLOB_V2/daily_data --mode=ref 
--pattern=201*.nc --maxMessages=1 | header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=1296 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=analysed_sst,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp,STORED_VAR_NAME=analysed_sst
 --bufferSize=1000000 --remoteReplyTimeout=360000 | add-id: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/generate-tile-
 id.groovy | set-dataset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=AVHRR_OI_L4_GHRSST_NCEI' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6
 --cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+stream create --name ingest-avhrr-clim --definition 
"scan-for-avhrr-clim-granules: file 
--dir=/usr/local/data/nexus/AVHRR_L4_GLOB_V2/climatology_5day --mode=ref 
--pattern=*.nc --maxMessages=1 | header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=1296 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=analysed_sst,META=analysed_sst_std,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp,STORED_VAR_NAME=analysed_sst
 --bufferSize=1000000 --remoteReplyTimeout=360000 | add-id: script 
--script=file:///usr/local/spring-xd/cur
 rent/xd-nexus-shared/generate-tile-id.groovy | add-time: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/add-time-from-granulename.groovy
 --variables='regex=^(\\d{3}),dateformat=DDD' | add-day-atr: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/add-day-of-year-attribute.groovy
 --variables='regex=^(\\d{3})' | set-dataset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=AVHRR_OI_L4_GHRSST_NCEI_CLIM' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6
 --cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+module.python-chain.count=5,module.nexus.count=5
+
+
+
+
+stream create --name ingest-modis-aqua-aod-500 --definition "file 
--dir=/usr/local/data/nexus/MODIS_AQUA_AOD/scrubbed_daily_data --mode=ref 
--pattern=*.nc | header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=500 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=MYD08_D3_6_Aerosol_Optical_Depth_Land_Ocean_Mean,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp
 --bufferSize=1000000 --remoteReplyTimeout=1300000 | add-id: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/generate-tile-id.groovy
 | set-d
 ataset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=MODIS_AQUA_AOD_500' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6
 --cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+stream create --name ingest-modis-aqua-aod-16 --definition "file 
--dir=/usr/local/data/nexus/MODIS_AQUA_AOD/scrubbed_daily_data --mode=ref 
--pattern=*.nc | header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=16 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=MYD08_D3_6_Aerosol_Optical_Depth_Land_Ocean_Mean,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp
 --bufferSize=1000000 --remoteReplyTimeout=1300000 | add-id: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/generate-tile-id.groovy
 | set-dat
 aset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=MODIS_AQUA_AOD_16' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6
 --cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+stream create --name ingest-modis-terra-aod-500 --definition "file 
--dir=/usr/local/data/nexus/MODIS_TERRA_AOD/scrubbed_daily_data --mode=ref 
--pattern=*.nc | header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=500 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=MOD08_D3_6_Aerosol_Optical_Depth_Land_Ocean_Mean,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp
 --bufferSize=1000000 --remoteReplyTimeout=1300000 | add-id: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/generate-tile-id.groovy
 | set
 -dataset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=MODIS_TERRA_AOD_500' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6
 --cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+stream create --name ingest-modis-terra-aod-16 --definition "file 
--dir=/usr/local/data/nexus/MODIS_TERRA_AOD/scrubbed_daily_data --mode=ref 
--pattern=*.nc | header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=16 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=MOD08_D3_6_Aerosol_Optical_Depth_Land_Ocean_Mean,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp
 --bufferSize=1000000 --remoteReplyTimeout=1300000 | add-id: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/generate-tile-id.groovy
 | set-d
 ataset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=MODIS_TERRA_AOD_16' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6
 --cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-base/xd-container-logback.groovy
----------------------------------------------------------------------
diff --git a/docker/ingest-base/xd-container-logback.groovy 
b/docker/ingest-base/xd-container-logback.groovy
new file mode 100644
index 0000000..c5a3e11
--- /dev/null
+++ b/docker/ingest-base/xd-container-logback.groovy
@@ -0,0 +1,83 @@
+import org.springframework.xd.dirt.util.logging.CustomLoggerConverter
+import org.springframework.xd.dirt.util.logging.VersionPatternConverter
+import ch.qos.logback.classic.encoder.PatternLayoutEncoder
+import ch.qos.logback.core.rolling.RollingFileAppender
+import ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy
+
+// We highly recommended that you always add a status listener just
+// after the last import statement and before all other statements
+// NOTE - this includes logging configuration in the log and stacktraces in 
the event of errors
+// statusListener(OnConsoleStatusListener)
+
+// Emulates Log4j formatting
+conversionRule("category", CustomLoggerConverter)
+
+//XD Version
+conversionRule("version", VersionPatternConverter)
+
+def ISO8601 = "yyyy-MM-dd'T'HH:mm:ssZ"
+def datePattern = ISO8601
+
+appender("STDOUT", ConsoleAppender) {
+       encoder(PatternLayoutEncoder) {
+               pattern = "%d{${datePattern}} %version %level{5} %thread 
%category{2} - %msg%n"
+       }
+}
+
+def logfileNameBase = 
"${System.getProperty('xd.home')}/logs/container-${System.getProperty('PID')}"
+
+appender("FILE", RollingFileAppender) {
+       file = "${logfileNameBase}.log"
+       append = true
+       rollingPolicy(TimeBasedRollingPolicy) {
+               fileNamePattern = "${logfileNameBase}-%d{yyyy-MM-dd}.%i.log"
+               timeBasedFileNamingAndTriggeringPolicy(SizeAndTimeBasedFNATP) {
+                       maxFileSize = "100MB"
+               }
+               maxHistory = 30
+       }
+
+       encoder(PatternLayoutEncoder) {
+               pattern = "%d{${datePattern}} %version %level{5} %thread 
%category{2} - %msg%n"
+       }
+}
+
+root(WARN, ["STDOUT", "FILE"])
+
+logger("org.nasa", DEBUG)
+logger("org.springframework.scheduling.concurrent", DEBUG, ["FILE"], false)
+
+logger("org.springframework.xd", WARN)
+logger("org.springframework.xd.dirt.server", INFO)
+logger("org.springframework.xd.dirt.util.XdConfigLoggingInitializer", INFO)
+logger("xd.sink", INFO)
+logger("org.springframework.xd.sqoop", INFO)
+
+logger("org.springframework", WARN)
+logger("org.springframework.boot", WARN)
+logger("org.springframework.integration", WARN)
+logger("org.springframework.retry", WARN)
+logger("org.springframework.amqp", WARN)
+
+logger("org.nasa.ingest.tcpshell", INFO)
+
+//This prevents the "Error:KeeperErrorCode = NodeExists" INFO messages
+//logged by ZooKeeper when a parent node does not exist while
+//invoking Curator's creatingParentsIfNeeded node builder.
+logger("org.apache.zookeeper.server.PrepRequestProcessor", WARN)
+
+// This prevents the WARN level about a non-static, @Bean method in Spring 
Batch that is irrelevant
+logger("org.springframework.context.annotation.ConfigurationClassEnhancer", 
ERROR)
+
+// This prevents boot LoggingApplicationListener logger's misleading warning 
message
+logger("org.springframework.boot.logging.LoggingApplicationListener", ERROR)
+
+// This prevents Hadoop configuration warnings
+logger("org.apache.hadoop.conf.Configuration", ERROR)
+
+
+//This is for the throughput-sampler sink module
+logger("org.springframework.xd.integration.throughput", INFO)
+
+// Suppress json-path warning until SI 4.2 is released
+logger("org.springframework.integration.config.IntegrationRegistrar", ERROR)

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-base/xd-singlenode-logback.groovy
----------------------------------------------------------------------
diff --git a/docker/ingest-base/xd-singlenode-logback.groovy 
b/docker/ingest-base/xd-singlenode-logback.groovy
new file mode 100644
index 0000000..5aec45b
--- /dev/null
+++ b/docker/ingest-base/xd-singlenode-logback.groovy
@@ -0,0 +1,91 @@
+/*****************************************************************************
+* Copyright (c) 2016 Jet Propulsion Laboratory,
+* California Institute of Technology.  All rights reserved
+*****************************************************************************/
+import org.springframework.xd.dirt.util.logging.CustomLoggerConverter
+import org.springframework.xd.dirt.util.logging.VersionPatternConverter
+import ch.qos.logback.classic.encoder.PatternLayoutEncoder
+import ch.qos.logback.core.rolling.RollingFileAppender
+
+// We highly recommended that you always add a status listener just
+// after the last import statement and before all other statements
+// NOTE - this includes logging configuration in the log and stacktraces in 
the event of errors
+// statusListener(OnConsoleStatusListener)
+
+// Emulates Log4j formatting
+conversionRule("category", CustomLoggerConverter)
+
+//XD Version
+conversionRule("version", VersionPatternConverter)
+
+def ISO8601 = "yyyy-MM-dd'T'HH:mm:ssZ"
+def datePattern = ISO8601
+
+appender("STDOUT", ConsoleAppender) {
+       encoder(PatternLayoutEncoder) {
+               pattern = "%d{${datePattern}} %version %level{5} %thread 
%category{2} - %msg%n"
+       }
+}
+
+def logfileNameBase = 
"${System.getProperty('xd.home')}/logs/singlenode-${System.getProperty('PID')}"
+
+appender("FILE", RollingFileAppender) {
+       file = "${logfileNameBase}.log"
+       append = false
+       rollingPolicy(TimeBasedRollingPolicy) {
+               fileNamePattern = "${logfileNameBase}-%d{yyyy-MM-dd}.%i.log"
+               timeBasedFileNamingAndTriggeringPolicy(SizeAndTimeBasedFNATP) {
+                       maxFileSize = "100KB"
+               }
+       }
+
+       encoder(PatternLayoutEncoder) {
+               pattern = "%d{${datePattern}} %version %level{5} %thread 
%category{2} - %msg%n"
+       }
+}
+
+root(WARN, ["STDOUT", "FILE"])
+
+logger("org.nasa", INFO)
+logger("org.springframework.xd", WARN)
+logger("org.springframework.xd.dirt.server", INFO)
+logger("org.springframework.xd.dirt.util.XdConfigLoggingInitializer", INFO)
+logger("xd.sink", INFO)
+logger("org.springframework.xd.sqoop", INFO)
+// This is for the throughput-sampler sink module
+logger("org.springframework.xd.integration.throughput", INFO)
+
+logger("org.springframework", WARN)
+logger("org.springframework.boot", WARN)
+logger("org.springframework.integration", WARN)
+logger("org.springframework.retry", WARN)
+logger("org.springframework.amqp", WARN)
+
+// Below this line are specific settings for things that are too noisy
+logger("org.springframework.beans.factory.config", ERROR)
+logger("org.springframework.amqp.rabbit.listener.SimpleMessageListenerContainer",
 ERROR)
+
+// This prevents the WARN level InstanceNotFoundException: 
org.apache.ZooKeeperService:name0=StandaloneServer_port-1
+logger("org.apache.zookeeper.jmx.MBeanRegistry", ERROR)
+
+
+// This prevents the WARN level about a non-static, @Bean method in Spring 
Batch that is irrelevant
+logger("org.springframework.context.annotation.ConfigurationClassEnhancer", 
ERROR)
+
+// This prevents the "Error:KeeperErrorCode = NodeExists" INFO messages
+// logged by ZooKeeper when a parent node does not exist while
+// invoking Curator's creatingParentsIfNeeded node builder.
+logger("org.apache.zookeeper.server.PrepRequestProcessor", WARN)
+
+
+// This prevents boot LoggingApplicationListener logger's misleading warning 
message
+logger("org.springframework.boot.logging.LoggingApplicationListener", ERROR)
+
+
+
+// This prevents Hadoop configuration warnings
+logger("org.apache.hadoop.conf.Configuration", ERROR)
+
+// Suppress json-path warning until SI 4.2 is released
+logger("org.springframework.integration.config.IntegrationRegistrar", ERROR)
+

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-container/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/ingest-container/Dockerfile 
b/docker/ingest-container/Dockerfile
new file mode 100644
index 0000000..873a800
--- /dev/null
+++ b/docker/ingest-container/Dockerfile
@@ -0,0 +1,5 @@
+FROM nexusjpl/ingest-base
+
+USER springxd
+ENTRYPOINT ["/usr/local/nexus-ingest.sh"]
+CMD ["--container"]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-container/README.md
----------------------------------------------------------------------
diff --git a/docker/ingest-container/README.md 
b/docker/ingest-container/README.md
new file mode 100644
index 0000000..ee17bc3
--- /dev/null
+++ b/docker/ingest-container/README.md
@@ -0,0 +1,74 @@
+# ingest-container Docker
+
+This can be used to start spring-xd as a container for use in distributed mode 
with all nexus modules already installed.
+
+# Docker Compose
+
+Use the [docker-compose.yml](docker-compose.yml) file to start up a container 
and place it on the same network as services started from 
nexusjpl/ingest-admin. Example:
+
+    MYSQL_PASSWORD=admin ZK_HOST_IP=10.200.10.1 KAFKA_HOST_IP=10.200.10.1 
docker-compose up
+
+`MYSQL_PASSWORD` must match the password used for the user called `xd` when 
the MySQL database was initialized.
+`ZK_HOST_IP` must be set to a valid IP address of a zookeeper host that will 
be used to manage Spring XD.
+`KAFKA_HOST_IP` must be set to a valid IP address of a kafka broker that will 
be used for the transport layer of Spring XD
+
+# Docker Run
+
+This container relies on 5 external services that must already be running: 
nexusjpl/ingest-admin, MySQL, Redis, Zookeeper, and Kafka.
+
+To start the server use:
+
+    docker run -it \
+    -e "MYSQL_PORT_3306_TCP_ADDR=mysqldb" -e "MYSQL_PORT_3306_TCP_PORT=3306" \
+    -e "MYSQL_USER=xd" -e "MYSQL_PASSWORD=admin" \
+    -e "REDIS_ADDR=redis" -e "REDIS_PORT=6397" \
+    -e "ZOOKEEPER_CONNECT=zkhost:2181" -e "ZOOKEEPER_XD_CHROOT=springxd" \
+    -e "KAFKA_BROKERS=kafka1:9092" -e "KAFKA_ZKADDRESS=zkhost:2181/kafka"
+    --add-host="zkhost:10.200.10.1" \
+    --add-host="kafka1:10.200.10.1"
+    --network container:ingest-admin
+    --name xd-admin nexusjpl/ingest-container
+
+This mode requires a number of Environment Variables to be defined.
+
+#####  `MYSQL_PORT_3306_TCP_ADDR`
+
+Address to a running MySQL service
+
+#####  `MYSQL_PORT_3306_TCP_PORT`
+
+Port for running MySQL service
+
+#####  `MYSQL_USER`
+
+Username to connnect to MySQL service
+
+#####  `MYSQL_PASSWORD`
+
+Password for connecting to MySQL service
+
+#####  `ZOOKEEPER_CONNECT`
+
+Zookeeper connect string. Can be a comma-delimmited list of host:port values.
+
+#####  `ZOOKEEPER_XD_CHROOT`
+
+Zookeeper root node for spring-xd
+
+#####  `REDIS_ADDR`
+
+Address to a running Redis service
+
+#####  `REDIS_PORT`
+
+Port for running Redis service
+
+#####  `KAFKA_BROKERS`
+
+Comma-delimmited list of host:port values which define the list of Kafka 
brokers used for transport.
+
+#####  `KAFKA_ZKADDRESS`
+
+Specifies the ZooKeeper connection string in the form hostname:port where host 
and port are the host and port of a ZooKeeper server.  
+
+The server may also have a ZooKeeper chroot path as part of its ZooKeeper 
connection string which puts its data under some path in the global ZooKeeper 
namespace. If so the consumer should use the same chroot path in its connection 
string. For example to give a chroot path of `/chroot/path` you would give the 
connection string as 
`hostname1:port1,hostname2:port2,hostname3:port3/chroot/path`.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-container/docker-compose.yml
----------------------------------------------------------------------
diff --git a/docker/ingest-container/docker-compose.yml 
b/docker/ingest-container/docker-compose.yml
new file mode 100644
index 0000000..8800aaf
--- /dev/null
+++ b/docker/ingest-container/docker-compose.yml
@@ -0,0 +1,44 @@
+version: '3'
+
+networks:
+  ingestadmin_ingestnetwork:
+      external: true
+  nexus:
+      external: true
+
+volumes:
+  data-volume:
+
+services:
+
+    xd-container:
+        image: nexusjpl/ingest-container:1
+        container_name: xd-container
+        command: [-c]
+        environment:
+            - MYSQL_PORT_3306_TCP_ADDR=mysqldb
+            - MYSQL_PORT_3306_TCP_PORT=3306
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD
+            - REDIS_ADDR=redis
+            - REDIS_PORT=6379
+            - "ZOOKEEPER_CONNECT=zkhost:2181"
+            - ZOOKEEPER_XD_CHROOT=springxd
+            - "KAFKA_BROKERS=kafka1:9092"
+            - "KAFKA_ZKADDRESS=zkhost:2181/kafka"
+        external_links:
+            - mysqldb
+            - redis
+        extra_hosts:
+            - "zkhost:$ZK_HOST_IP"
+            - "kafka1:$KAFKA_HOST_IP"
+        networks:
+            - default
+            - ingestadmin_ingestnetwork
+            - nexus
+        volumes:
+              - data-volume:/usr/local/data/nexus
+        deploy:
+            placement:
+                constraints:
+                    - node.labels.nexus.type == ingest

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-singlenode/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/ingest-singlenode/Dockerfile 
b/docker/ingest-singlenode/Dockerfile
new file mode 100644
index 0000000..70da4fe
--- /dev/null
+++ b/docker/ingest-singlenode/Dockerfile
@@ -0,0 +1,5 @@
+FROM nexusjpl/ingest-base
+
+USER springxd
+ENTRYPOINT ["/usr/local/nexus-ingest.sh"]
+CMD ["--singleNode"]
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/ingest-singlenode/README.md
----------------------------------------------------------------------
diff --git a/docker/ingest-singlenode/README.md 
b/docker/ingest-singlenode/README.md
new file mode 100644
index 0000000..d263618
--- /dev/null
+++ b/docker/ingest-singlenode/README.md
@@ -0,0 +1,27 @@
+# ingest-singlenode Docker
+
+This can be used to start spring-xd in singlenode mode with all nexus modules 
already installed.
+
+# Singlenode Mode
+
+To start the server in singleNode mode use:
+
+    docker run -it -v ~/data/:/usr/local/data/nexus -p 9393:9393 --name 
nexus-ingest nexusjpl/ingest-singlenode
+
+This starts a singleNode instance of Spring XD with a data volume mounted to 
the host machine's home directory to be used for ingestion. It also exposes the 
Admin UI on port 9393 of the host machine.
+
+You can then connect to the Admin UI with http://localhost:9393/admin-ui.
+
+# XD Shell
+
+## Using Docker Exec
+
+Once the nexus-ingest container is running you can use docker exec to start an 
XD Shell that communicates with the singlenode server:
+
+    docker exec -it nexus-ingest xd-shell
+
+## Using Standalone Container
+
+You can use the springxd shell docker image to start a seperate container 
running XD shell connected to the singlenode server:
+
+    docker run -it --network container:nexus-ingest springxd/shell
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/kafka/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/kafka/Dockerfile b/docker/kafka/Dockerfile
new file mode 100644
index 0000000..ca62ee3
--- /dev/null
+++ b/docker/kafka/Dockerfile
@@ -0,0 +1,26 @@
+FROM centos:7
+
+RUN yum -y update && \
+    yum -y install wget
+
+# Install Oracle JDK 1.8u121-b13
+RUN wget -q --no-cookies --no-check-certificate --header "Cookie: 
gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; 
oraclelicense=accept-securebackup-cookie" 
"http://download.oracle.com/otn-pub/java/jdk/8u121-b13/e9e7ea248e2c4826b92b3f075a80e441/jdk-8u121-linux-x64.rpm";
 && \
+    yum -y install jdk-8u121-linux-x64.rpm && \
+    rm jdk-8u121-linux-x64.rpm
+ENV JAVA_HOME /usr/java/default
+
+# Install Kafka
+RUN groupadd -r kafka && useradd -r -g kafka kafka
+WORKDIR /usr/local/kafka
+RUN wget -q http://apache.claz.org/kafka/0.9.0.1/kafka_2.11-0.9.0.1.tgz && \
+    tar -xvzf kafka_2.11-0.9.0.1.tgz && \
+    ln -s kafka_2.11-0.9.0.1 current && \
+    rm -f kafka_2.11-0.9.0.1.tgz && \
+    chown -R kafka:kafka kafka_2.11-0.9.0.1
+
+ENV PATH $PATH:/usr/local/kafka/current/bin
+    
+USER kafka
+COPY kafka.properties /usr/local/kafka/current/config/
+
+ENTRYPOINT ["kafka-server-start.sh"]

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/kafka/README.md
----------------------------------------------------------------------
diff --git a/docker/kafka/README.md b/docker/kafka/README.md
new file mode 100644
index 0000000..49142fb
--- /dev/null
+++ b/docker/kafka/README.md
@@ -0,0 +1,19 @@
+
+
+This Docker container runs Apache Kafka 2.11-0.9.0.1 on CentOs 7 with Oracle 
jdk-8u121-linux-x64.
+
+The easiest way to run it is:
+
+    docker run -it --add-host="zkhost:10.200.10.1" -p 9092:9092 nexusjpl/kafka
+
+The default command when running this container is the `kafka-server-start.sh` 
script using the `/usr/local/kafka/current/config/server.properties` 
configuration file. 
+
+Be default, the server.properties file is configured to connect to zookeeper 
as such:
+
+    zookeeper.connect=zkhost:2181/kafka
+
+So by specifying `--add-host="zkhost:10.200.10.1"` with a valid IP address to 
a zookeeper node, Kafka will be able to connect to an existing cluster.
+
+If you need to override any of the configuration you can use:
+
+    docker run -it --add-host="zkhost:10.200.10.1" nexusjpl/kafka 
kafka-server-start.sh /usr/local/kafka/current/config/server.properties 
--override property=value
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/kafka/docker-compose.yml
----------------------------------------------------------------------
diff --git a/docker/kafka/docker-compose.yml b/docker/kafka/docker-compose.yml
new file mode 100644
index 0000000..b01c4ff
--- /dev/null
+++ b/docker/kafka/docker-compose.yml
@@ -0,0 +1,53 @@
+version: '3'
+
+networks:
+  nexus:
+      external: true
+      
+      
+services:
+    
+    kafka1:
+        image: nexusjpl/kafka
+        container_name: kafka1
+        command: ["/usr/local/kafka/current/config/kafka1.properties"]
+        extra_hosts:
+            - "zkhost1:$ZK_HOST1_IP"
+            - "zkhost2:$ZK_HOST2_IP"
+            - "zkhost3:$ZK_HOST3_IP"
+        networks:
+            - nexus
+        deploy:
+            placement:
+              constraints:
+                - node.labels.nexus.type == kafka
+        
+    kafka2:
+        image: nexusjpl/kafka
+        container_name: kafka2
+        command: ["/usr/local/kafka/current/config/kafka2.properties"]
+        extra_hosts:
+            - "zkhost1:$ZK_HOST1_IP"
+            - "zkhost2:$ZK_HOST2_IP"
+            - "zkhost3:$ZK_HOST3_IP"
+        networks:
+            - nexus
+        deploy:
+            placement:
+              constraints:
+                - node.labels.nexus.type == kafka
+        
+    kafka3:
+        image: nexusjpl/kafka
+        container_name: kafka3
+        command: ["/usr/local/kafka/current/config/kafka3.properties"]
+        extra_hosts:
+            - "zkhost1:$ZK_HOST1_IP"
+            - "zkhost2:$ZK_HOST2_IP"
+            - "zkhost3:$ZK_HOST3_IP"
+        networks:
+            - nexus
+        deploy:
+            placement:
+              constraints:
+                - node.labels.nexus.type == kafka
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/kafka/kafka.properties
----------------------------------------------------------------------
diff --git a/docker/kafka/kafka.properties b/docker/kafka/kafka.properties
new file mode 100644
index 0000000..44d2794
--- /dev/null
+++ b/docker/kafka/kafka.properties
@@ -0,0 +1,131 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# see kafka.server.KafkaConfig for additional details and defaults
+
+############################# Server Basics #############################
+
+# Override this on command line startup
+# e.g. --override broker.id=1
+#broker.id=1
+
+#The maximum size of a message that the server can receive. It is important 
that this property
+# be in sync with the maximum fetch size your consumers use or else an unruly 
producer  
+# will be able to publish messages too large for consumers to consume.
+message.max.bytes=2048576
+
+#The number of byes of messages to attempt to fetch for each partition in the 
fetch requests the replicas send to the leader.
+replica.fetch.max.bytes=2048576
+
+############################# Socket Server Settings 
#############################
+
+listeners=PLAINTEXT://:9092
+
+# The port the socket server listens on
+#port=9092
+
+# Hostname the broker will bind to. If not set, the server will bind to all 
interfaces
+#host.name=localhost
+
+# Hostname the broker will advertise to producers and consumers. If not set, 
it uses the
+# value for "host.name" if configured.  Otherwise, it will use the value 
returned from
+# java.net.InetAddress.getCanonicalHostName().
+#advertised.host.name=<hostname routable by clients>
+
+# The port to publish to ZooKeeper for clients to use. If this is not set,
+# it will publish the same port that the broker binds to.
+#advertised.port=<port accessible by clients>
+
+# The number of threads handling network requests
+num.network.threads=3
+
+# The number of threads doing disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection 
against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma seperated list of directories under which to store log files
+log.dirs=/tmp/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at 
startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs 
located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only 
fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data 
to disk.
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using 
replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when 
the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a 
small flush interval may lead to exceessive seeks.
+# The settings below allow one to configure the flush policy to flush data 
after a period of time or
+# every N messages (or both). This can be done globally and overridden on a 
per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy 
#############################
+
+# The following configurations control the disposal of log segments. The 
policy can
+# be set to delete segments after a period of time, or after a given size has 
accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. 
Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log as 
long as the remaining
+# segments don't drop below log.retention.bytes.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log 
segment will be created.
+log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted 
according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=zkhost1:2181,zkhost2:2181,zkhost3:2181/kafka
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=10000
+zookeeper.session.timeout.ms=10000
+zookeeper.sync.time.ms=4000
+

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/nexus-cluster.yml
----------------------------------------------------------------------
diff --git a/docker/nexus-cluster.yml b/docker/nexus-cluster.yml
new file mode 100644
index 0000000..d3fdba1
--- /dev/null
+++ b/docker/nexus-cluster.yml
@@ -0,0 +1,251 @@
+version: '3'
+
+networks:
+    nexus:
+        external: true
+      
+volumes:
+    kafka1-logs:
+        driver: local
+    kafka2-logs:
+        driver: local
+    kafka3-logs:
+        driver: local
+
+services:
+
+    mysqldb:
+        image: mysql:8
+        command: [--character-set-server=latin1, 
--collation-server=latin1_swedish_ci]
+        hostname: mysqldb
+        environment:
+            - MYSQL_RANDOM_ROOT_PASSWORD=yes
+            - MYSQL_DATABASE=xdjob
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=admin
+        networks:
+            - nexus
+        ports:
+            - "3306"
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-admin == true
+                    
+    redis:
+        image: redis:3
+        hostname: redis
+        networks:
+            - nexus
+        ports:
+            - "6379"
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-admin == true
+                    
+    xd-admin:
+        image: nexusjpl/ingest-admin
+        hostname: xd-admin
+        depends_on:
+            - "mysqldb"
+            - "redis"
+        networks:
+            - nexus
+        ports:
+          - "9393:9393"
+        environment:
+            - MYSQL_PORT_3306_TCP_ADDR=mysqldb
+            - MYSQL_PORT_3306_TCP_PORT=3306
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=admin
+            - REDIS_ADDR=redis
+            - REDIS_PORT=6379
+            - ZOOKEEPER_CONNECT=zk1:2181,zk2:2181,zk3:2181
+            - ZOOKEEPER_XD_CHROOT=springxd
+            - KAFKA_BROKERS=kafka1:9092,kafka2:9092,kafka3:9092
+            - KAFKA_ZKADDRESS=zk1:2181,zk2:2181,zk3:2181/kafka
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-admin == true
+  
+    xd-container:
+        image: nexusjpl/ingest-container
+        depends_on:
+            - "xd-admin"
+        networks:
+            - nexus
+        volumes:
+            - /efs/data/share/datasets:/usr/local/data/nexus/
+        environment:
+            - MYSQL_PORT_3306_TCP_ADDR=mysqldb
+            - MYSQL_PORT_3306_TCP_PORT=3306
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=admin
+            - REDIS_ADDR=redis
+            - REDIS_PORT=6379
+            - ZOOKEEPER_CONNECT=zk1:2181,zk2:2181,zk3:2181
+            - ZOOKEEPER_XD_CHROOT=springxd
+            - KAFKA_BROKERS=kafka1:9092,kafka2:9092,kafka3:9092
+            - KAFKA_ZKADDRESS=zk1:2181,zk2:2181,zk3:2181/kafka
+        deploy:
+            mode: global
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 5
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest == true
+
+    zk1:
+        image: zookeeper
+        hostname: zk1
+        networks:
+            - nexus
+        volumes:
+            - /data/zk1/data:/data
+            - /data/zk1/datalog:/datalog
+        environment:
+            - ZOO_MY_ID=1
+            - "ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888"
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-msg == true
+                    - node.labels.nexus.zoo.id == 1
+    zk2:
+        image: zookeeper
+        hostname: zk2
+        networks:
+            - nexus
+        volumes:
+            - /data/zk2/data:/data
+            - /data/zk2/datalog:/datalog
+        environment:
+            - ZOO_MY_ID=2
+            - "ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888"
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-msg == true
+                    - node.labels.nexus.zoo.id == 2
+                    
+    zk3:
+        image: zookeeper
+        hostname: zk3
+        networks:
+            - nexus
+        volumes:
+            - /data/zk3/data:/data
+            - /data/zk3/datalog:/datalog
+        environment:
+            - ZOO_MY_ID=3
+            - "ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888"
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-msg == true
+                    - node.labels.nexus.zoo.id == 3
+                    
+    kafka1:
+        image: nexusjpl/kafka
+        command: ["/usr/local/kafka/current/config/kafka.properties", 
"--override", "zookeeper.connect=zk1:2181,zk2:2181,zk3:2181/kafka", 
"--override", "broker.id=1"]
+        hostname: kafka1
+        depends_on:
+            - "zk1"
+            - "zk2"
+            - "zk3"
+        networks:
+            - nexus
+        volumes:
+            - kafka1-logs:/tmp/kafka-logs
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-msg == true
+                    - node.labels.nexus.kafka.id == 1
+                    
+    kafka2:
+        image: nexusjpl/kafka
+        command: ["/usr/local/kafka/current/config/kafka.properties", 
"--override", "zookeeper.connect=zk1:2181,zk2:2181,zk3:2181/kafka", 
"--override", "broker.id=2"]
+        hostname: kafka2
+        depends_on:
+            - "zk1"
+            - "zk2"
+            - "zk3"
+        networks:
+            - nexus
+        volumes:
+            - kafka2-logs:/tmp/kafka-logs
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-msg == true
+                    - node.labels.nexus.kafka.id == 2
+                    
+    kafka3:
+        image: nexusjpl/kafka
+        command: ["/usr/local/kafka/current/config/kafka.properties", 
"--override", "zookeeper.connect=zk1:2181,zk2:2181,zk3:2181/kafka", 
"--override", "broker.id=3"]
+        hostname: kafka3
+        depends_on:
+            - "zk1"
+            - "zk2"
+            - "zk3"
+        networks:
+            - nexus
+        volumes:
+            - kafka3-logs:/tmp/kafka-logs
+        deploy:
+            restart_policy:
+                condition: any
+                delay: 5s
+                max_attempts: 3
+                window: 120s
+            placement:
+                constraints:
+                    - node.labels.nexus.ingest-msg == true
+                    - node.labels.nexus.kafka.id == 3
+        
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/nexus-webapp/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/nexus-webapp/Dockerfile b/docker/nexus-webapp/Dockerfile
new file mode 100644
index 0000000..c3d61d7
--- /dev/null
+++ b/docker/nexus-webapp/Dockerfile
@@ -0,0 +1,20 @@
+# Run example: docker run -it --net=host -p 8083:8083 -e 
MASTER=mesos://127.0.0.1:5050 nexus-webapp
+
+FROM nexusjpl/spark-mesos-base
+
+MAINTAINER Joseph Jacob "[email protected]"
+
+# Set environment variables.
+
+ENV MASTER=local[1] \
+    SPARK_LOCAL_IP=nexus-webapp
+
+# Run NEXUS webapp.
+
+EXPOSE 8083
+
+WORKDIR /tmp
+
+COPY docker-entrypoint.sh /tmp/docker-entrypoint.sh
+
+ENTRYPOINT ["/tmp/docker-entrypoint.sh"]

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/nexus-webapp/docker-entrypoint.sh
----------------------------------------------------------------------
diff --git a/docker/nexus-webapp/docker-entrypoint.sh 
b/docker/nexus-webapp/docker-entrypoint.sh
new file mode 100755
index 0000000..d8b59a0
--- /dev/null
+++ b/docker/nexus-webapp/docker-entrypoint.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+sed -i "s/server.socket_host.*$/server.socket_host=$SPARK_LOCAL_IP/g" 
/nexus/analysis/webservice/config/web.ini && \
+sed -i 
"s/cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6/$CASSANDRA_CONTACT_POINTS/g"
 /nexus/data-access/nexustiles/config/datastores.ini && \
+sed -i "s/solr1:8983/$SOLR_URL_PORT/g" 
/nexus/data-access/nexustiles/config/datastores.ini
+
+cd /nexus/data-access
+python setup.py install --force
+
+cd /nexus/analysis
+python setup.py install --force
+
+python -m webservice.webapp
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/nexusbase/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/nexusbase/Dockerfile b/docker/nexusbase/Dockerfile
new file mode 100644
index 0000000..7a2d454
--- /dev/null
+++ b/docker/nexusbase/Dockerfile
@@ -0,0 +1,36 @@
+FROM centos:7
+
+WORKDIR /tmp
+
+RUN yum -y update && \
+    yum -y install wget \
+    git \
+    which \
+    bzip2
+
+# Install Oracle JDK 1.8u121-b13
+RUN wget -q --no-cookies --no-check-certificate --header "Cookie: 
gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; 
oraclelicense=accept-securebackup-cookie" 
"http://download.oracle.com/otn-pub/java/jdk/8u121-b13/e9e7ea248e2c4826b92b3f075a80e441/jdk-8u121-linux-x64.rpm";
 && \
+    yum -y install jdk-8u121-linux-x64.rpm && \
+    rm jdk-8u121-linux-x64.rpm
+ENV JAVA_HOME /usr/java/default
+
+# ########################
+# # Apache Maven   #
+# ########################
+ENV M2_HOME /usr/local/apache-maven
+ENV M2 $M2_HOME/bin 
+ENV PATH $PATH:$M2
+
+RUN mkdir $M2_HOME && \
+    wget -q 
http://mirror.stjschools.org/public/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz
 && \
+    tar -xvzf apache-maven-3.3.9-bin.tar.gz -C $M2_HOME --strip-components=1 
&& \
+    rm -f apache-maven-3.3.9-bin.tar.gz
+
+# ########################
+# # Anaconda   #
+# ########################
+RUN wget -q https://repo.continuum.io/archive/Anaconda2-4.3.0-Linux-x86_64.sh 
-O install_anaconda.sh && \
+    /bin/bash install_anaconda.sh -b -p /usr/local/anaconda2 && \
+    rm install_anaconda.sh
+ENV PATH $PATH:/usr/local/anaconda2/bin
+

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/solr-single-node/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/solr-single-node/Dockerfile 
b/docker/solr-single-node/Dockerfile
new file mode 100644
index 0000000..c2b9302
--- /dev/null
+++ b/docker/solr-single-node/Dockerfile
@@ -0,0 +1,14 @@
+FROM nexusjpl/nexus-solr
+MAINTAINER Nga Quach "[email protected]"
+
+USER root
+
+RUN apt-get update && apt-get -y install git && rm -rf /var/lib/apt/lists/*
+
+RUN cd / && git clone https://github.com/dataplumber/nexus.git && cp -r 
/nexus/data-access/config/schemas/solr/nexustiles . && rm -rf /nexus
+
+USER $SOLR_USER
+
+RUN cp -r /nexustiles /opt/solr/server/solr/.
+
+VOLUME ["/opt/solr/server/solr/nexustiles/data"]

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/solr-single-node/README.md
----------------------------------------------------------------------
diff --git a/docker/solr-single-node/README.md 
b/docker/solr-single-node/README.md
new file mode 100644
index 0000000..91df066
--- /dev/null
+++ b/docker/solr-single-node/README.md
@@ -0,0 +1,9 @@
+
+
+This Docker container runs Apache Solr v6.4.1 as a single node with nexustiles 
collection.
+
+The easiest way to run it is:
+
+    docker run --net=host --name nexus-solr -v 
/home/nexus/solr/data:/opt/solr/server/solr/nexustiles/data -d 
nexusjpl/nexus-solr-single-node
+
+/home/nexus/solr/data is directory on host machine where index files will be 
written to.

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/solr/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/solr/Dockerfile b/docker/solr/Dockerfile
new file mode 100644
index 0000000..2f72a9f
--- /dev/null
+++ b/docker/solr/Dockerfile
@@ -0,0 +1,20 @@
+FROM solr:6.4.2
+MAINTAINER Nga Quach "[email protected]"
+
+USER root
+
+RUN cd / && wget 
https://downloads.sourceforge.net/project/jts-topo-suite/jts/1.14/jts-1.14.zip 
&& unzip jts-1.14.zip -d /jts-1.14 && rm jts-1.14.zip
+
+RUN apt-get update && apt-get -y install git && rm -rf /var/lib/apt/lists/*
+
+RUN cd / && git clone https://github.com/dataplumber/nexus.git && cp -r 
/nexus/data-access/config/schemas/solr/nexustiles /tmp/nexustiles && rm -rf 
/nexus
+
+RUN mkdir /solr-home
+
+RUN chown -R $SOLR_USER:$SOLR_USER /solr-home
+
+VOLUME /solr-home
+
+RUN cp /jts-1.14/lib/jts-1.14.jar /opt/solr/server/lib/jts-1.14.jar
+
+RUN cp /jts-1.14/lib/jtsio-1.14.jar /opt/solr/server/lib/jtsio-1.14.jar

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/solr/README.md
----------------------------------------------------------------------
diff --git a/docker/solr/README.md b/docker/solr/README.md
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/spark-mesos-agent/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/spark-mesos-agent/Dockerfile 
b/docker/spark-mesos-agent/Dockerfile
new file mode 100644
index 0000000..5c58fb0
--- /dev/null
+++ b/docker/spark-mesos-agent/Dockerfile
@@ -0,0 +1,13 @@
+# Run example: docker run --net=nexus --name mesos-agent1 
nexusjpl/spark-mesos-agent
+
+FROM nexusjpl/spark-mesos-base
+
+MAINTAINER Joseph Jacob "[email protected]"
+
+# Run a Mesos slave.
+
+WORKDIR ${MESOS_HOME}/build
+
+COPY docker-entrypoint.sh /tmp/docker-entrypoint.sh
+
+ENTRYPOINT ["/tmp/docker-entrypoint.sh"]

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/spark-mesos-agent/docker-entrypoint.sh
----------------------------------------------------------------------
diff --git a/docker/spark-mesos-agent/docker-entrypoint.sh 
b/docker/spark-mesos-agent/docker-entrypoint.sh
new file mode 100755
index 0000000..1ed2c34
--- /dev/null
+++ b/docker/spark-mesos-agent/docker-entrypoint.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+sed -i "s/server.socket_host.*$/server.socket_host=$SPARK_LOCAL_IP/g" 
/nexus/analysis/webservice/config/web.ini && \
+sed -i 
"s/cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6/$CASSANDRA_CONTACT_POINTS/g"
 /nexus/data-access/nexustiles/config/datastores.ini && \
+sed -i "s/solr1:8983/$SOLR_URL_PORT/g" 
/nexus/data-access/nexustiles/config/datastores.ini
+
+cd /nexus/data-access
+python setup.py install --force
+
+cd /nexus/analysis
+python setup.py install --force
+
+${MESOS_HOME}/build/bin/mesos-agent.sh 
--master=${MESOS_MASTER_NAME}:${MESOS_MASTER_PORT} --port=${MESOS_AGENT_PORT} 
--work_dir=${MESOS_WORKDIR} --no-systemd_enable_support --launcher=posix 
--no-switch_user --executor_environment_variables='{ "PYTHON_EGG_CACHE": "/tmp" 
}'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/spark-mesos-base/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/spark-mesos-base/Dockerfile 
b/docker/spark-mesos-base/Dockerfile
new file mode 100644
index 0000000..04dd4ff
--- /dev/null
+++ b/docker/spark-mesos-base/Dockerfile
@@ -0,0 +1,109 @@
+FROM nexusjpl/nexusbase
+
+MAINTAINER Joseph Jacob "[email protected]"
+
+# Install packages needed for builds
+
+RUN yum install -y gcc python-devel
+
+# Set environment variables.  For Mesos, I used MESOS_VER because MESOS_VERSION
+# is expected to be a logical TRUE/FALSE flag that tells Mesos whether or not 
+# to simply print the version number and exit.
+
+ENV INSTALL_LOC=/usr/local \
+    HADOOP_VERSION=2.7.3 \
+    SPARK_VERSION=2.1.0 \
+    MESOS_VER=1.2.0 \
+    MESOS_MASTER_PORT=5050 \
+    MESOS_AGENT_PORT=5051 \
+    MESOS_WORKDIR=/var/lib/mesos \
+    MESOS_IP=0.0.0.0 \
+    MESOS_MASTER_NAME=mesos-master \
+    PYTHON_EGG_CACHE=/tmp
+
+ENV CONDA_HOME=${INSTALL_LOC}/anaconda2 \
+    MESOS_HOME=${INSTALL_LOC}/mesos-${MESOS_VER} \
+    SPARK_DIR=spark-${SPARK_VERSION} \
+    SPARK_PACKAGE=spark-${SPARK_VERSION}-bin-hadoop2.7 \
+    MESOS_MASTER=mesos://${MESOS_IP}:${MESOS_PORT} \
+    MESOS_PACKAGE=mesos-${MESOS_VER}.tar.gz
+
+ENV SPARK_HOME=${INSTALL_LOC}/${SPARK_DIR} \
+    PYSPARK_DRIVER_PYTHON=${CONDA_HOME}/bin/python \
+    PYSPARK_PYTHON=${CONDA_HOME}/bin/python \
+    PYSPARK_SUBMIT_ARGS="--driver-memory=4g pyspark-shell"
+    
+ENV 
PYTHONPATH=${PYTHONPATH}:${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.10.4-src.zip:${SPARK_HOME}/python/lib/pyspark.zip
 \
+    MESOS_NATIVE_JAVA_LIBRARY=${INSTALL_LOC}/lib/libmesos.so \
+    
+    SPARK_EXECUTOR_URI=${INSTALL_LOC}/${SPARK_PACKAGE}.tgz
+    
+WORKDIR ${INSTALL_LOC}
+
+# Set up Spark
+
+RUN wget --quiet http://d3kbcqa49mib13.cloudfront.net/${SPARK_PACKAGE}.tgz && \
+    tar -xzf ${SPARK_PACKAGE}.tgz && \
+    chown -R root.root ${SPARK_PACKAGE} && \
+    ln -s ${SPARK_PACKAGE} ${SPARK_DIR}
+
+# Set up Mesos
+
+COPY install_mesos.sh .
+
+RUN source ./install_mesos.sh && \
+    mkdir ${MESOS_WORKDIR}
+
+# Set up Anaconda environment
+    
+ENV PATH=${CONDA_HOME}/bin:${PATH}:${HADOOP_HOME}/bin:${SPARK_HOME}/bin
+
+RUN conda install -c conda-forge -y netCDF4 && \
+    conda install -y numpy cython mpld3 scipy basemap gdal matplotlib && \
+    pip install shapely cassandra-driver==3.5.0 && \
+    conda install -c conda-forge backports.functools_lru_cache=1.3
+
+# Workaround missing libcom_err.so (needed for gdal)
+
+RUN cd /usr/lib64 && ln -s libcom_err.so.2 libcom_err.so.3
+
+# Workaround missing conda libs needed for gdal
+
+RUN cd ${CONDA_HOME}/lib && \
+    ln -s libnetcdf.so.11 libnetcdf.so.7 && \
+    ln -s libkea.so.1.4.6 libkea.so.1.4.5 && \
+    ln -s libhdf5_cpp.so.12 libhdf5_cpp.so.10 && \
+    ln -s libjpeg.so.9 libjpeg.so.8
+
+RUN yum install -y mesa-libGL.x86_64
+
+# Retrieve NEXUS code and build it.
+
+WORKDIR /
+
+RUN git clone https://github.com/dataplumber/nexus.git
+
+RUN sed -i 's/,webservice.algorithms.doms//g' 
/nexus/analysis/webservice/config/web.ini && \
+    sed -i 's/127.0.0.1/nexus-webapp/g' 
/nexus/analysis/webservice/config/web.ini && \
+    sed -i 
's/127.0.0.1/cassandra1,cassandra2,cassandra3,cassandra4,cassandra5,cassandra6/g'
 /nexus/data-access/nexustiles/config/datastores.ini && \
+    sed -i 's/localhost:8983/solr1:8983/g' 
/nexus/data-access/nexustiles/config/datastores.ini
+
+WORKDIR /nexus/nexus-ingest/nexus-messages
+
+RUN ./gradlew clean build install
+
+WORKDIR /nexus/nexus-ingest/nexus-messages/build/python/nexusproto
+
+RUN python setup.py install
+
+WORKDIR /nexus/data-access
+
+RUN python setup.py install
+
+WORKDIR /nexus/analysis
+
+RUN python setup.py install
+
+WORKDIR /tmp
+
+CMD ["/bin/bash"]

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/spark-mesos-base/install_mesos.sh
----------------------------------------------------------------------
diff --git a/docker/spark-mesos-base/install_mesos.sh 
b/docker/spark-mesos-base/install_mesos.sh
new file mode 100644
index 0000000..65a647e
--- /dev/null
+++ b/docker/spark-mesos-base/install_mesos.sh
@@ -0,0 +1,49 @@
+# Install a few utility tools
+yum install -y tar wget git
+
+# Fetch the Apache Maven repo file.
+wget 
http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo 
-O /etc/yum.repos.d/epel-apache-maven.repo
+
+# Install the EPEL repo so that we can pull in 'libserf-1' as part of our
+# subversion install below.
+yum install -y epel-release
+
+# 'Mesos > 0.21.0' requires 'subversion > 1.8' devel package,
+# which is not available in the default repositories.
+# Create a WANdisco SVN repo file to install the correct version:
+bash -c 'cat > /etc/yum.repos.d/wandisco-svn.repo <<EOF
+[WANdiscoSVN]
+name=WANdisco SVN Repo 1.9
+enabled=1
+baseurl=http://opensource.wandisco.com/centos/7/svn-1.9/RPMS/\$basearch/
+gpgcheck=1
+gpgkey=http://opensource.wandisco.com/RPM-GPG-KEY-WANdisco
+EOF'
+
+# Parts of Mesos require systemd in order to operate. However, Mesos
+# only supports versions of systemd that contain the 'Delegate' flag.
+# This flag was first introduced in 'systemd version 218', which is
+# lower than the default version installed by centos. Luckily, centos
+# 7.1 has a patched 'systemd < 218' that contains the 'Delegate' flag.
+# Explicity update systemd to this patched version.
+yum update systemd
+
+# Install essential development tools.
+yum groupinstall -y "Development Tools"
+
+# Install other Mesos dependencies.
+yum install -y apache-maven python-devel java-1.8.0-openjdk-devel zlib-devel 
libcurl-devel openssl-devel cyrus-sasl-devel cyrus-sasl-md5 apr-devel 
subversion-devel apr-util-devel
+
+# Retrieve MESOS package
+wget --quiet http://www.apache.org/dist/mesos/${MESOS_VER}/${MESOS_PACKAGE}
+tar -zxf ${MESOS_PACKAGE} -C ${INSTALL_LOC}
+rm -f ${MESOS_PACKAGE}
+
+# Configure and build.
+cd ${MESOS_HOME}
+mkdir build
+cd build
+../configure
+make
+make check
+make install

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/spark-mesos-master/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/spark-mesos-master/Dockerfile 
b/docker/spark-mesos-master/Dockerfile
new file mode 100644
index 0000000..49a298f
--- /dev/null
+++ b/docker/spark-mesos-master/Dockerfile
@@ -0,0 +1,13 @@
+# Run example: docker run --net=nexus --name mesos-master -p 5050:5050 
nexusjpl/spark-mesos-master
+
+FROM nexusjpl/spark-mesos-base
+
+MAINTAINER Joseph Jacob "[email protected]"
+
+EXPOSE ${MESOS_MASTER_PORT}
+
+# Run a Mesos master.
+
+WORKDIR ${MESOS_HOME}/build
+
+CMD ["/bin/bash", "-c", "./bin/mesos-master.sh --ip=${MESOS_IP} 
--hostname=${MESOS_MASTER_NAME} --port=${MESOS_MASTER_PORT} 
--work_dir=${MESOS_WORKDIR}"]

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/zookeeper/Dockerfile
----------------------------------------------------------------------
diff --git a/docker/zookeeper/Dockerfile b/docker/zookeeper/Dockerfile
new file mode 100644
index 0000000..de3f7b1
--- /dev/null
+++ b/docker/zookeeper/Dockerfile
@@ -0,0 +1,27 @@
+FROM java:openjdk-8-jre-alpine
+MAINTAINER Namrata Malarout <[email protected]>
+
+LABEL name="zookeeper" version="3.4.8"
+
+RUN apk add --no-cache wget bash \
+    && mkdir /opt \
+    && wget -q -O - 
http://apache.mirrors.pair.com/zookeeper/zookeeper-3.4.8/zookeeper-3.4.8.tar.gz 
| tar -xzf - -C /opt \
+    && mv /opt/zookeeper-3.4.8 /opt/zookeeper \
+    && cp /opt/zookeeper/conf/zoo_sample.cfg /opt/zookeeper/conf/zoo.cfg \
+    && mkdir -p /tmp/zookeeper
+
+EXPOSE 2181 2182 2183 2888 3888 3889 3890
+
+WORKDIR /opt/zookeeper
+
+VOLUME ["/opt/zookeeper/conf", "/tmp/zookeeper"]
+RUN mkdir /tmp/zookeeper/1
+RUN mkdir /tmp/zookeeper/2
+RUN mkdir /tmp/zookeeper/3
+RUN printf '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n' 'tickTime=2000' 
'dataDir=/tmp/zookeeper/1' 'clientPort=2182' 'initLimit=5' 'syncLimit=2' 
'server.1=localhost:2888:3888' 'server.2=localhost:2889:3889' 
'server.3=localhost:2890:3890' >> /opt/zookeeper/zoo.cfg
+RUN printf '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n' 'tickTime=2000' 
'dataDir=/tmp/zookeeper/2' 'clientPort=2182' 'initLimit=5' 'syncLimit=2' 
'server.1=localhost:2888:3888' 'server.2=localhost:2889:3889' 
'server.3=localhost:2890:3890' > /opt/zookeeper/zoo2.cfg
+RUN printf '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n' 'tickTime=2000' 
'dataDir=/tmp/zookeeper/3' 'clientPort=2183' 'initLimit=5' 'syncLimit=2' 
'server.1=localhost:2888:3888' 'server.2=localhost:2889:3889' 
'server.3=localhost:2890:3890' > /opt/zookeeper/zoo3.cfg
+RUN cd /opt/zookeeper
+RUN cp zoo2.cfg conf/zoo2.cfg
+RUN cp zoo3.cfg conf/zoo3.cfg
+CMD bin/zkServer.sh start zoo.cfg

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/docker/zookeeper/README.md
----------------------------------------------------------------------
diff --git a/docker/zookeeper/README.md b/docker/zookeeper/README.md
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/README.md
----------------------------------------------------------------------
diff --git a/esip-workshop/README.md b/esip-workshop/README.md
new file mode 100644
index 0000000..515c8e6
--- /dev/null
+++ b/esip-workshop/README.md
@@ -0,0 +1,2 @@
+# nexus-esip-workshop
+Materials for Nexus Summer 2017 ESIP workshops

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/analysis/docker-compose.yml
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/analysis/docker-compose.yml 
b/esip-workshop/docker/analysis/docker-compose.yml
new file mode 100644
index 0000000..8347238
--- /dev/null
+++ b/esip-workshop/docker/analysis/docker-compose.yml
@@ -0,0 +1,93 @@
+version: '3'
+
+networks:
+    nexus:
+        external:
+            name: infrastructure_nexus
+
+services:
+
+    mesos-master:
+        image: nexusjpl/spark-mesos-master
+        container_name: mesos-master
+        networks:
+            - nexus
+        ports:
+            - "5050:5050"
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+
+    mesos-agent1:
+        image: nexusjpl/spark-mesos-agent
+        container_name: mesos-agent1
+        depends_on:
+            - mesos-master
+        environment:
+            - CASSANDRA_CONTACT_POINTS=cassandra1,cassandra2,cassandra3
+            - SOLR_URL_PORT=solr1:8983
+        networks:
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+            
+    mesos-agent2:
+        image: nexusjpl/spark-mesos-agent
+        container_name: mesos-agent2
+        depends_on:
+            - mesos-master
+        environment:
+            - CASSANDRA_CONTACT_POINTS=cassandra1,cassandra2,cassandra3
+            - SOLR_URL_PORT=solr1:8983
+        networks:
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+            
+    mesos-agent3:
+        image: nexusjpl/spark-mesos-agent
+        container_name: mesos-agent3
+        depends_on:
+            - mesos-master
+        environment:
+            - CASSANDRA_CONTACT_POINTS=cassandra1,cassandra2,cassandra3
+            - SOLR_URL_PORT=solr1:8983
+        networks:
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+
+    nexus-webapp:
+        image: nexusjpl/nexus-webapp
+        depends_on:
+            - mesos-master
+            - mesos-agent1
+            - mesos-agent2
+            - mesos-agent3
+        container_name: nexus-webapp
+        networks:
+            - nexus
+        ports:
+            - "8083:8083"
+            - "4040:4040"
+        environment:
+            - MASTER=mesos://mesos-master:5050
+            - CASSANDRA_CONTACT_POINTS=cassandra1,cassandra2,cassandra3
+            - SOLR_URL_PORT=solr1:8983
+            - SPARK_LOCAL_IP=nexus-webapp
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/infrastructure/.env
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/infrastructure/.env 
b/esip-workshop/docker/infrastructure/.env
new file mode 100644
index 0000000..77ae944
--- /dev/null
+++ b/esip-workshop/docker/infrastructure/.env
@@ -0,0 +1 @@
+HOST_DATA_DIR=/home/ndeploy/data

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/infrastructure/docker-compose.yml
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/infrastructure/docker-compose.yml 
b/esip-workshop/docker/infrastructure/docker-compose.yml
new file mode 100644
index 0000000..3d631ed
--- /dev/null
+++ b/esip-workshop/docker/infrastructure/docker-compose.yml
@@ -0,0 +1,172 @@
+version: '3'
+
+networks:
+  nexus:
+
+services:
+
+    zk1:
+        image: zookeeper
+        container_name: zk1
+        volumes:
+            - ${HOST_DATA_DIR}/zk1/data:/data
+            - ${HOST_DATA_DIR}/zk1/datalog:/datalog
+        environment:
+            - ZOO_MY_ID=1
+            - ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888
+        networks:
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+
+    zk2:
+        image: zookeeper
+        container_name: zk2
+        volumes:
+            - ${HOST_DATA_DIR}/zk2/data:/data
+            - ${HOST_DATA_DIR}/zk2/datalog:/datalog
+        environment:
+            - ZOO_MY_ID=2
+            - ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888
+        networks:
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+
+    zk3:
+        image: zookeeper
+        container_name: zk3
+        volumes:
+            - ${HOST_DATA_DIR}/zk3/data:/data
+            - ${HOST_DATA_DIR}/zk3/datalog:/datalog
+        environment:
+            - ZOO_MY_ID=3
+            - ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888
+        networks:
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+
+    solr1:
+        image: nexusjpl/nexus-solr
+        depends_on:
+            - zk1
+            - zk2
+            - zk3
+        container_name: solr1
+        volumes:
+            - 
${PWD}/solr-zk-custom-init.sh:/docker-entrypoint-initdb.d/solr-zk-custom-init.sh
+            - ${HOST_DATA_DIR}/solr1:/solr-home
+        environment:
+            - SOLR_HEAP=16g
+            - ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888
+        networks:
+            - nexus
+        logging:
+          options:
+            max-size: "10m"
+            max-file: "3"
+        command: ["solr-foreground", "-c", "-z", 
"zk1:2181,zk2:2181,zk3:2181/solr", "-s", "/solr-home"]
+
+    solr2:
+        image: nexusjpl/nexus-solr
+        depends_on:
+            - zk1
+            - zk2
+            - zk3
+        container_name: solr2
+        volumes:
+            - 
${PWD}/solr-zk-custom-init.sh:/docker-entrypoint-initdb.d/solr-zk-custom-init.sh
+            - ${HOST_DATA_DIR}/solr2:/solr-home
+        environment:
+            - SOLR_HEAP=16g
+            - ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888
+        networks:
+            - nexus
+        logging:
+          options:
+            max-size: "10m"
+            max-file: "3"
+        command: ["solr-foreground", "-c", "-z", 
"zk1:2181,zk2:2181,zk3:2181/solr", "-s", "/solr-home"]
+
+    solr3:
+        image: nexusjpl/nexus-solr
+        depends_on:
+            - zk1
+            - zk2
+            - zk3
+        container_name: solr3
+        volumes:
+            - 
${PWD}/solr-zk-custom-init.sh:/docker-entrypoint-initdb.d/solr-zk-custom-init.sh
+            - ${HOST_DATA_DIR}/solr3:/solr-home
+        environment:
+            - SOLR_HEAP=16g
+            - ZOO_SERVERS=server.1=zk1:2888:3888 server.2=zk2:2888:3888 
server.3=zk3:2888:3888
+        networks:
+            - nexus
+        logging:
+          options:
+            max-size: "10m"
+            max-file: "3"
+        command: ["solr-foreground", "-c", "-z", 
"zk1:2181,zk2:2181,zk3:2181/solr", "-s", "/solr-home"]
+
+    cassandra1:
+        image: cassandra:2.2.8
+        container_name: cassandra1
+        volumes:
+            - ${HOST_DATA_DIR}/cassandra1:/var/lib/cassandra
+        environment:
+            - CASSANDRA_BROADCAST_ADDRESS=cassandra1
+        networks:
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+
+    cassandra2:
+        image: cassandra:2.2.8
+        container_name: cassandra2
+        volumes:
+            - ${HOST_DATA_DIR}/cassandra2:/var/lib/cassandra
+        environment:
+            - CASSANDRA_BROADCAST_ADDRESS=cassandra2
+            - CASSANDRA_SEEDS=cassandra1
+        networks:
+            - nexus
+        depends_on:
+            - cassandra1
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+
+    cassandra3:
+        image: cassandra:2.2.8
+        container_name: cassandra3
+        volumes:
+            - ${HOST_DATA_DIR}/cassandra3:/var/lib/cassandra
+        environment:
+            - CASSANDRA_BROADCAST_ADDRESS=cassandra3
+            - CASSANDRA_SEEDS=cassandra1
+        networks:
+            - nexus
+        depends_on:
+            - cassandra1
+            - cassandra2
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/infrastructure/solr-zk-custom-init.sh
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/infrastructure/solr-zk-custom-init.sh 
b/esip-workshop/docker/infrastructure/solr-zk-custom-init.sh
new file mode 100644
index 0000000..ffb7ce2
--- /dev/null
+++ b/esip-workshop/docker/infrastructure/solr-zk-custom-init.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+ZK="zk1:2181,zk2:2181,zk3:2181"
+if solr zk ls /solr -z $ZK ; then
+    echo "/solr root exists in ZooKeeper. Skip creation of /solr root."
+else
+    echo "Create /solr root path in ZooKeeper"
+    if solr zk mkroot /solr -z $ZK ; then
+        echo "Upload solr.xml to zookeeper"
+        solr zk cp file:/opt/solr/server/solr/solr.xml zk:/solr.xml -z $ZK/solr
+        echo "Clone nexus repo"
+        git clone https://github.com/dataplumber/nexus.git /opt/solr/nexus
+        echo "Upload nexustiles config to zookeeper"
+        solr zk upconfig -n nexustiles -d 
/opt/solr/nexus/data-access/config/schemas/solr/nexustiles/conf -z $ZK/solr
+    fi
+fi

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/ingest/docker-compose.yml
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/ingest/docker-compose.yml 
b/esip-workshop/docker/ingest/docker-compose.yml
new file mode 100644
index 0000000..f5c8aa6
--- /dev/null
+++ b/esip-workshop/docker/ingest/docker-compose.yml
@@ -0,0 +1,192 @@
+version: '3'
+
+networks:
+  ingestnetwork:
+  nexus:
+      external:
+          name: infrastructure_nexus
+
+services:
+
+    mysqldb:
+        image: mysql:8
+        container_name: mysqldb
+        command: ["--character-set-server=latin1", 
"--collation-server=latin1_swedish_ci"]
+        environment:
+            - MYSQL_RANDOM_ROOT_PASSWORD=yes
+            - MYSQL_DATABASE=xdjob
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=changeit
+        networks:
+            - ingestnetwork
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+    
+    redis:
+        image: redis:3
+        container_name: redis
+        networks:
+            - ingestnetwork
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+            
+    kafka1:
+        image: nexusjpl/kafka
+        container_name: kafka1
+        command: ["/usr/local/kafka/current/config/server.properties", 
"--override", "zookeeper.connect=zk1:2181,zk2:2181,zk3:2181/kafka", 
"--override", "broker.id=1"]
+        networks:
+            - ingestnetwork
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+            
+    kafka2:
+        image: nexusjpl/kafka
+        container_name: kafka2
+        command: ["/usr/local/kafka/current/config/server.properties", 
"--override", "zookeeper.connect=zk1:2181,zk2:2181,zk3:2181/kafka", 
"--override", "broker.id=2"]
+        networks:
+            - ingestnetwork
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+            
+    kafka3:
+        image: nexusjpl/kafka
+        container_name: kafka3
+        command: ["/usr/local/kafka/current/config/server.properties", 
"--override", "zookeeper.connect=zk1:2181,zk2:2181,zk3:2181/kafka", 
"--override", "broker.id=3"]
+        networks:
+            - ingestnetwork
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+            
+    xd-admin:
+        image: nexusjpl/ingest-admin
+        container_name: xd-admin
+        command: [-a]
+        environment:
+            - MYSQL_PORT_3306_TCP_ADDR=mysqldb
+            - MYSQL_PORT_3306_TCP_PORT=3306
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=changeit
+            - REDIS_ADDR=redis
+            - REDIS_PORT=6379
+            - "ZOOKEEPER_CONNECT=zk1:2181,zk2:2181,zk3:2181"
+            - ZOOKEEPER_XD_CHROOT=springxd
+            - "KAFKA_BROKERS=kafka1:9092,kafka2:9092,kafka3:9092"
+            - "KAFKA_ZKADDRESS=zk1:2181,zk2:2181,zk3:2181/kafka"
+        depends_on:
+            - mysqldb
+            - redis
+            - kafka1
+            - kafka2
+            - kafka3
+        networks:
+            - ingestnetwork
+            - nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+            
+    xd-container1:
+        image: nexusjpl/ingest-container
+        container_name: xd-container1
+        command: [-c]
+        environment:
+            - MYSQL_PORT_3306_TCP_ADDR=mysqldb
+            - MYSQL_PORT_3306_TCP_PORT=3306
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=changeit
+            - REDIS_ADDR=redis
+            - REDIS_PORT=6379
+            - "ZOOKEEPER_CONNECT=zk1:2181,zk2:2181,zk3:2181"
+            - ZOOKEEPER_XD_CHROOT=springxd
+            - "KAFKA_BROKERS=kafka1:9092,kafka2:9092,kafka3:9092"
+            - "KAFKA_ZKADDRESS=zk1:2181,zk2:2181,zk3:2181/kafka"
+        depends_on:
+            - xd-admin
+        networks:
+            - ingestnetwork
+            - nexus
+        volumes:
+              - /home/ndeploy/ingest/data:/usr/local/data/nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+              
+    xd-container2:
+        image: nexusjpl/ingest-container
+        container_name: xd-container2
+        command: [-c]
+        environment:
+            - MYSQL_PORT_3306_TCP_ADDR=mysqldb
+            - MYSQL_PORT_3306_TCP_PORT=3306
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=changeit
+            - REDIS_ADDR=redis
+            - REDIS_PORT=6379
+            - "ZOOKEEPER_CONNECT=zk1:2181,zk2:2181,zk3:2181"
+            - ZOOKEEPER_XD_CHROOT=springxd
+            - "KAFKA_BROKERS=kafka1:9092,kafka2:9092,kafka3:9092"
+            - "KAFKA_ZKADDRESS=zk1:2181,zk2:2181,zk3:2181/kafka"
+        depends_on:
+            - xd-admin
+        networks:
+            - ingestnetwork
+            - nexus
+        volumes:
+              - /home/ndeploy/ingest/data:/usr/local/data/nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
+              
+    xd-container3:
+        image: nexusjpl/ingest-container
+        container_name: xd-container3
+        command: [-c]
+        environment:
+            - MYSQL_PORT_3306_TCP_ADDR=mysqldb
+            - MYSQL_PORT_3306_TCP_PORT=3306
+            - MYSQL_USER=xd
+            - MYSQL_PASSWORD=changeit
+            - REDIS_ADDR=redis
+            - REDIS_PORT=6379
+            - "ZOOKEEPER_CONNECT=zk1:2181,zk2:2181,zk3:2181"
+            - ZOOKEEPER_XD_CHROOT=springxd
+            - "KAFKA_BROKERS=kafka1:9092,kafka2:9092,kafka3:9092"
+            - "KAFKA_ZKADDRESS=zk1:2181,zk2:2181,zk3:2181/kafka"
+        depends_on:
+            - xd-admin
+        networks:
+            - ingestnetwork
+            - nexus
+        volumes:
+              - /home/ndeploy/ingest/data:/usr/local/data/nexus
+        logging:
+            driver: "json-file"
+            options:
+                max-size: "10m"
+                max-file: "3"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/ingest/stream-definitions.txt
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/ingest/stream-definitions.txt 
b/esip-workshop/docker/ingest/stream-definitions.txt
new file mode 100644
index 0000000..df7c2bd
--- /dev/null
+++ b/esip-workshop/docker/ingest/stream-definitions.txt
@@ -0,0 +1,13 @@
+stream create --name ingest-avhrr --definition "scan-for-avhrr-granules: file 
--dir=/usr/local/data/nexus/avhrr --mode=ref --pattern=*.nc --maxMessages=1 
--fixedDelay=1 | header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=1296 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.kelvintocelsius.transform:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=analysed_sst,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp,STORED_VAR_NAME=analysed_sst
 --bufferSize=1000000 --remoteReplyTimeout=360000 | add-id: script 
--script=file:///usr/local/spring-xd/current/xd-nex
 us-shared/generate-tile-id.groovy | set-dataset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=AVHRR_OI_L4_GHRSST_NCEI' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3 
--cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+stream deploy --name ingest-avhrr --properties 
module.python-chain.count=3,module.nexus.count=3
+
+
+stream create --name ingest-avhrr-clim --definition 
"scan-for-avhrr-clim-granules: file --dir=/usr/local/data/nexus/avhrr-clim/ 
--mode=ref --pattern=*.nc --maxMessages=1 --fixedDelay=1 | 
header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=1296 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.kelvintocelsius.transform:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=analysed_sst,META=analysed_sst_std,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp,STORED_VAR_NAME=analysed_sst
 --bufferSize=1000000 --remoteReplyTimeout=360000 | add-id: script --script=file
 :///usr/local/spring-xd/current/xd-nexus-shared/generate-tile-id.groovy | 
add-time: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/add-time-from-granulename.groovy
 --variables='regex=^(\\d{3}),dateformat=DDD' | add-day-atr: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/add-day-of-year-attribute.groovy
 --variables='regex=^(\\d{3})' | set-dataset-name: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy
 --variables='datasetname=AVHRR_OI_L4_GHRSST_NCEI_CLIM' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3 
--cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+stream deploy --name ingest-avhrr-clim
+
+
+stream create --name ingest-trmm --definition "scan-for-trmm-granules: file 
--dir=/usr/local/data/nexus/trmm/ --mode=ref --pattern=*.nc | 
header-absolutefilepath: header-enricher 
--headers={\"absolutefilepath\":\"payload\"} | dataset-tiler 
--dimensions=lat,lon --tilesDesired=1296 | join-with-static-time: transform 
--expression=\"'time:0:1,'+payload.stream().collect(T(java.util.stream.Collectors).joining(';time:0:1,'))+';file://'+headers['absolutefilepath']\"
 | python-chain: tcpshell --command='python -u -m nexusxd.processorchain' 
--environment=CHAIN=nexusxd.tilereadingprocessor.read_grid_data:nexusxd.emptytilefilter.filter_empty_tiles:nexusxd.tilesumarizingprocessor.summarize_nexustile,VARIABLE=TRMM_3B42_daily_precipitation_V7,LATITUDE=lat,LONGITUDE=lon,TIME=time,READER=GRIDTILE,TEMP_DIR=/tmp
 --bufferSize=1000000 --remoteReplyTimeout=1300000 | add-id: script 
--script=file:///usr/local/spring-xd/current/xd-nexus-shared/generate-tile-id.groovy
 | set-dataset-name: script --script=file:
 ///usr/local/spring-xd/current/xd-nexus-shared/set-dataset-name.groovy 
--variables='datasetname=TRMM_3B42_daily' | nexus 
--cassandraContactPoints=cassandra1,cassandra2,cassandra3 
--cassandraKeyspace=nexustiles 
--solrCloudZkHost=zk1:2181,zk2:2181,zk3:2181/solr --solrCollection=nexustiles 
--cassandraPort=9042"
+
+stream deploy --name ingest-trmm --properties 
module.python-chain.count=3,module.nexus.count=3
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/jupyter/Dockerfile
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/jupyter/Dockerfile 
b/esip-workshop/docker/jupyter/Dockerfile
new file mode 100644
index 0000000..960f51e
--- /dev/null
+++ b/esip-workshop/docker/jupyter/Dockerfile
@@ -0,0 +1,22 @@
+FROM jupyter/scipy-notebook
+
+USER root
+RUN apt-get update && \
+    apt-get install -y git libgeos-dev
+USER jovyan
+
+COPY requirements.txt /tmp
+RUN pip install -r /tmp/requirements.txt && \
+    conda install -y basemap
+
+ENV REBUILD_CODE=truee
+RUN mkdir -p /home/jovyan/nexuscli && \
+    cd /home/jovyan/nexuscli && \
+    git init && \
+    git remote add -f origin https://github.com/dataplumber/nexus && \
+    git config core.sparseCheckout true && \
+    echo "client" >> .git/info/sparse-checkout && \
+    git pull origin master && \
+    cd client && \
+    python setup.py install
+

http://git-wip-us.apache.org/repos/asf/incubator-sdap-nexus/blob/ff98fa34/esip-workshop/docker/jupyter/requirements.txt
----------------------------------------------------------------------
diff --git a/esip-workshop/docker/jupyter/requirements.txt 
b/esip-workshop/docker/jupyter/requirements.txt
new file mode 100644
index 0000000..e4f500a
--- /dev/null
+++ b/esip-workshop/docker/jupyter/requirements.txt
@@ -0,0 +1,4 @@
+shapely
+requests
+numpy
+cassandra-driver==3.9.0


Reply via email to