Author: starchmd
Date: Wed Dec 10 18:48:25 2014
New Revision: 1644505
URL: http://svn.apache.org/r1644505
Log:
OODT-699: Improving cluster-management scripts
Added:
oodt/trunk/cluster-tools/setup/hdfs-config/
oodt/trunk/cluster-tools/setup/hdfs-config/core-site.xml
oodt/trunk/cluster-tools/setup/hdfs-config/hdfs-site.xml
Modified:
oodt/trunk/cluster-tools/scripts/shutdown.sh
oodt/trunk/cluster-tools/scripts/start-up.sh
oodt/trunk/cluster-tools/setup/deploy.sh
oodt/trunk/cluster-tools/setup/env-vars.sh.tmpl
oodt/trunk/cluster-tools/setup/install.sh
Modified: oodt/trunk/cluster-tools/scripts/shutdown.sh
URL:
http://svn.apache.org/viewvc/oodt/trunk/cluster-tools/scripts/shutdown.sh?rev=1644505&r1=1644504&r2=1644505&view=diff
==============================================================================
--- oodt/trunk/cluster-tools/scripts/shutdown.sh (original)
+++ oodt/trunk/cluster-tools/scripts/shutdown.sh Wed Dec 10 18:48:25 2014
@@ -42,3 +42,5 @@ if [[ "${SCREEN}" != "" ]]
then
screen -X -S ${SCREEN} quit
fi
+#Shutdown Hadoop HDFS
+${HADOOP_HOME}/sbin/stop-dfs.sh
Modified: oodt/trunk/cluster-tools/scripts/start-up.sh
URL:
http://svn.apache.org/viewvc/oodt/trunk/cluster-tools/scripts/start-up.sh?rev=1644505&r1=1644504&r2=1644505&view=diff
==============================================================================
--- oodt/trunk/cluster-tools/scripts/start-up.sh (original)
+++ oodt/trunk/cluster-tools/scripts/start-up.sh Wed Dec 10 18:48:25 2014
@@ -41,6 +41,8 @@ if [[ "${SCREEN}" != "" ]]
then
initScreen "${SCREEN}"
fi
+#Start Hadoop HDFS
+${HADOOP_HOME}/sbin/start-dfs.sh
#Run all start-up commands
for elem in "${starts[@]}"
@@ -50,7 +52,7 @@ do
host=${2}
cmd=${3}
echo "Running: ${cmd} on ${host} via ${SCREEN:-ssh} (${tab})"
-
+ #Run in screen if set, or ssh if not set
if [[ "${SCREEN}" != "" ]]
then
screenr ${tab} ${host} "${cmd}" || errorAndExit "Could not run ${cmd}
on ${host} via ${SCREEN}"
Modified: oodt/trunk/cluster-tools/setup/deploy.sh
URL:
http://svn.apache.org/viewvc/oodt/trunk/cluster-tools/setup/deploy.sh?rev=1644505&r1=1644504&r2=1644505&view=diff
==============================================================================
--- oodt/trunk/cluster-tools/setup/deploy.sh (original)
+++ oodt/trunk/cluster-tools/setup/deploy.sh Wed Dec 10 18:48:25 2014
@@ -69,19 +69,6 @@ while true; do
* ) echo "Please answer y or n.";;
esac
done
-#TAR=${TMP_DIR}/deploy.tar.gz
-#if [ -e ${TAR} ]
-#then
-# echo "Tar exists, using it."
-#else
-# #Make tar
-# pushd ${INSTALL_DIR} || { \
-# echo "ERROR: Failed to pushd to ${INSTALL_DIR} Exiting for
safety."; \
-# exit;
-# }
-# tar -czf ${TAR} *
-# popd
-#fi
#Put on all hosts
for host in $(cat ${HOSTS_FILE} | grep -v "^#" | tail -n +2 )
do
@@ -92,14 +79,4 @@ do
continue;\
}
rsync -avz ${INSTALL_DIR}/* "${host}:${INSTALL_DIR}"
-# scp ${TAR} ${host}:${INSTALL_DIR} \
-# ||{ \
-# echo "WARNING: Failed to scp ${TAR} to ${host}:${INSTALL_DIR} Deploy
manually."; \
-# continue;\
-# }
-# ssh ${host} "tar -xzf ${INSTALL_DIR}/$(basename ${TAR}) -C
${INSTALL_DIR}" \
-# ||{ \
-# echo "WARNING: Failed to untar $(basename ${TAR}) in
${host}:${INSTALL_DIR} Deploy manually."; \
-# continue;\
-# }
done
Modified: oodt/trunk/cluster-tools/setup/env-vars.sh.tmpl
URL:
http://svn.apache.org/viewvc/oodt/trunk/cluster-tools/setup/env-vars.sh.tmpl?rev=1644505&r1=1644504&r2=1644505&view=diff
==============================================================================
--- oodt/trunk/cluster-tools/setup/env-vars.sh.tmpl (original)
+++ oodt/trunk/cluster-tools/setup/env-vars.sh.tmpl Wed Dec 10 18:48:25 2014
@@ -28,9 +28,20 @@
# TMP_DIR - Temporary directory used as scratch space.
# RUN_DIR - Directory that will hold run-time artifacts from software.
#****************************
+export RUN_DIR=/your/runtime/dir/goes/here
export INSTALL_DIR=/your/install/dir/goes/here
export TMP_DIR=/your/tmp/dir/goes/here
-export RUN_DIR=/your/runtime/dir/goes/here
+
+#****************************
+# Set SCREEN if use of GNU Screen is desired
+#****************************
+#export SCREEN="soodt-screen"
+
+#****************************
+# Set Ports
+#****************************
+export HADOOP_NAMENODE_PORT=8090
+export MESOS_MASTER_PORT=5050
#****************************
# Set these versions of software
@@ -46,12 +57,7 @@ export CLUSTER_TOOLS_VERSION=trunk
#****************************
# Set JAVA_HOME (will use from environment if set)
#****************************
-export JAVA_HOME=${JAVA_HOME:-/usr/java/jdk1.7.0_45/}
-
-#****************************
-# Set SCREEN if use of GNU Screen is desired
-#****************************
-#export SCREEN="soodt-screen"
+export JAVA_HOME=${JAVA_HOME:-/path/to/java/home/}
###
#Beyond This Line: Advanced Users Only
@@ -64,6 +70,7 @@ export ENV_VARS="${INSTALL_DIR}/cluster-
if [ -f ${HOSTS_FILE} ]
then
export MESOS_HOST="$(cat ${HOSTS_FILE} | grep -v "^#" | head -1)"
+ export HADOOP_NAMENODE="${MESOS_HOST}"
fi
export RESOURCEMGR_HOST=${MESOS_HOST}
# HADOOP envs
@@ -78,13 +85,25 @@ fi
# Mesos variables
export MESOS_BUILD="${INSTALL_DIR}/mesos-${APACHE_MESOS_VERSION}/build/"
+export MESOS_HOME="${MESOS_BUILD}"
export MESOS_LOG_DIR="${RUN_DIR}/log/mesos/"
export MESOS_WORK_DIR="${RUN_DIR}/work/mesos/"
+export MESOS_NATIVE_LIBRARY="${MESOS_BUILD}/src/.libs/libmesos.so"
# Get mesos master ip
if [ -n "${MESOS_HOST}" ]
then
export MESOS_MASTER_IP="$(host ${MESOS_HOST} | awk '{print $NF}')"
fi
+# For Spark
+export MASTER="mesos://${MESOS_MASTER_IP}:${MESOS_MASTER_PORT}"
+export SPARK_HOME="${INSTALL_DIR}/spark-${SPARK_VERSION}-bin-hadoop2.4"
+for lib in "${SPARK_HOME}/python/" "${SPARK_HOME}/python/build/"
+do
+ if [[ ${PYTHONPATH} != *${lib}* ]]
+ then
+ export PYTHONPATH=${PYTHONPATH}:${lib}
+ fi
+done
# Resource manager
export RESOURCE_HOST="${RESOURCEMGR_HOST}"
export RESOURCE_HOME="${INSTALL_DIR}/oodt/resource"
Added: oodt/trunk/cluster-tools/setup/hdfs-config/core-site.xml
URL:
http://svn.apache.org/viewvc/oodt/trunk/cluster-tools/setup/hdfs-config/core-site.xml?rev=1644505&view=auto
==============================================================================
--- oodt/trunk/cluster-tools/setup/hdfs-config/core-site.xml (added)
+++ oodt/trunk/cluster-tools/setup/hdfs-config/core-site.xml Wed Dec 10
18:48:25 2014
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://[HDFS_HOST]:[HDFS_PORT]</value>
+ <description>Filesystem name node setup</description>
+ </property>
+</configuration>
Added: oodt/trunk/cluster-tools/setup/hdfs-config/hdfs-site.xml
URL:
http://svn.apache.org/viewvc/oodt/trunk/cluster-tools/setup/hdfs-config/hdfs-site.xml?rev=1644505&view=auto
==============================================================================
--- oodt/trunk/cluster-tools/setup/hdfs-config/hdfs-site.xml (added)
+++ oodt/trunk/cluster-tools/setup/hdfs-config/hdfs-site.xml Wed Dec 10
18:48:25 2014
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file://[INSTALL_DIR]/hadoop/name-node/</value>
+ <description>Filesystem name node logging dir</description>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file://[INSTALL_DIR]/hadoop/data-node/</value>
+ <description>Filesystem data node setup</description>
+ </property>
+
+</configuration>
Modified: oodt/trunk/cluster-tools/setup/install.sh
URL:
http://svn.apache.org/viewvc/oodt/trunk/cluster-tools/setup/install.sh?rev=1644505&r1=1644504&r2=1644505&view=diff
==============================================================================
--- oodt/trunk/cluster-tools/setup/install.sh (original)
+++ oodt/trunk/cluster-tools/setup/install.sh Wed Dec 10 18:48:25 2014
@@ -27,9 +27,13 @@ then
exit 1
fi
. ${ENVS}
+#Setup hosts from bootstraped host file
+MESOS_HOST="$(cat ${HOSTS} | grep -v "^#" | head -1)"
+HADOOP_NAMENODE="${MESOS_HOST}"
#Tell us what ya going to do
echo "Installing the BDAS components:"
+echo " ----------- Software Versions -----------"
echo " Mesos version: ${APACHE_MESOS_VERSION:-ERROR: Version not set}"
echo " Scala version: ${SCALA_VERSION:-ERROR: Version not set}"
echo " Kafka version: ${KAFKA_VERSION:-ERROR: Version not set}"
@@ -37,17 +41,34 @@ echo " Spark version: ${SPARK_VERSION
echo " Tachyon version: ${TACHYON_VERSION:-ERROR: Version not set}"
echo " Hadoop version: ${HADOOP_VERSION:-ERROR: Version not set}"
echo " Cluster Tools version: ${CLUSTER_TOOLS_VERSION:-ERROR: Version not
set}"
+echo " --------- Environment Variables ---------"
+echo " Hosts file install: ${HOSTS_FILE:-ERROR: No hosts file install
location set}"
+echo " Environment install: ${ENV_VARS:-ERROR: No environment variables file
install location set}"
+echo " Hadoop namenode: ${HADOOP_NAMENODE:-ERROR: No Hadoop namenode
set}"
+echo " Hadoop namenode port: ${HADOOP_NAMENODE_PORT:-ERROR: No Hadoop
namenode port set}"
+echo " ------------ Support Software -----------"
+echo " Maven home: ${M2_HOME:-ERROR: No maven home set}"
+echo " Java home: ${JAVA_HOME:-ERROR: No maven home set}"
+echo " ---------- Support Directories ----------"
echo " Temporary directory: ${TMP_DIR:-ERROR: Temp dir not set}"
-echo " Running Directory: ${RUN_DIR:-ERROR: Running dir not set}"
-echo " -----------------------------------------"
+echo " Running directory: ${RUN_DIR:-ERROR: Running dir not set}"
+echo " ----------- Install Directory -----------"
echo " Install directory: ${INSTALL_DIR:-ERROR: Install dir not set}"
+#Checking installed software
+if [ -z ${M2_HOME} ] || [ -z ${JAVA_HOME} ] || [ ! -f ${M2_HOME}/bin/mvn ] || \
+ [ ! -f ${JAVA_HOME}/bin/java ]
+then
+ echo "ERROR: Needed software not found."
+ exit 1
+fi
#Checking versions
if [ -z ${APACHE_MESOS_VERSION} ] || [ -z ${SCALA_VERSION} ] || [ -z
${KAFKA_VERSION} ] || \
[ -z ${SPARK_VERSION} ] || [ -z ${TACHYON_VERSION} ] || [ -z
${HADOOP_VERSION} ] || \
- [ -z ${CLUSTER_TOOLS_VERSION} ] || [ -z ${INSTALL_DIR} ] || [ -z ${TMP_DIR}
]
+ [ -z ${CLUSTER_TOOLS_VERSION} ] || [ -z ${INSTALL_DIR} ] || [ -z ${TMP_DIR}
] || \
+ [ -z ${HADOOP_NAMENODE} ] || [ -z ${HADOOP_NAMENODE_PORT} ] || [ -z
${ENV_VARS} ] || [ -z ${HOSTS_FILE} ]
then
- echo "ERROR: Needed variables not set. Did you set the environment files?"
+ echo "ERROR: Needed variables not set. Did you source the environment
files?"
exit 1
fi
#Check directories exit
@@ -142,11 +163,10 @@ then
echo "Cluster tools already installed"
else
echo "Exporting OODT-cluster tools" | tee -a ${INSTALL_LOG}
- svn export
https://svn.apache.org/repos/asf/oodt/${CLUSTER_TOOLS_VERSION}/cluster-tools/
${INSTALL_DIR} \
+ svn export
https://svn.apache.org/repos/asf/oodt/${CLUSTER_TOOLS_VERSION}/cluster-tools/
${INSTALL_DIR}/cluster-tools/ \
||{ \
echo "WARNING: Failed to export cluster-tools:
${CLUSTER_TOOLS_VERSION} Install manually." | tee -a ${INSTALL_LOG};\
- continue;\
- }
+ }
cp ${ENVS} ${ENV_VARS}
cp ${HOSTS} ${HOSTS_FILE}
fi
@@ -163,4 +183,15 @@ else
make &>> ${INSTALL_LOG}
fi
+#Hadoop namenode and configuration
+echo "Replacing host and port information in Hadoop Information" | tee -a
${INSTALL_LOG}
+sed -i -e "s/[INSTALL_DIR]/${INSTALL_DIR}/g"
${INSTALL_DIR}/cluster-tools/setup/hdfs-config/*.xml
+sed -i -e "s/[HDFS_HOST]/${HADOOP_NAMENODE}/g"
${INSTALL_DIR}/cluster-tools/setup/hdfs-config/*.xml
+sed -i -e "s/[HDFS_PORT]/${HADOOP_NAMENODE_PORT}/g"
${INSTALL_DIR}/cluster-tools/setup/hdfs-config/*.xml
+tail -n +2 ${HOSTS} > ${INSTALL_DIR}/cluster-tools/setup/hdfs-config/slaves
+echo "Moving ${INSTALL_DIR}/cluster-tools/setup/hdfs-config/ to
${HADOOP_HOME}/etc/hadoop/" | tee -a ${INSTALL_LOG}
+mv --backup=numbered --suffix=.bak
${INSTALL_DIR}/cluster-tools/setup/hdfs-config/* ${HADOOP_HOME}/etc/hadoop
+echo "Formating HDFS namenode" | tee -a ${INSTALL_LOG}
+${HADOOP_HOME}/bin/hdfs namenode -format
+
echo "All done at $(date +"%Y-%m-%dT%H:%M:%S")" | tee -a ${INSTALL_LOG}