[[TRAFODION 1927]] [[TRAFODION 1692]] [[TRAFODION 1572]]

Install on multi-cluster (cloudera setup) --disabled

Vanilla Apache HBase support

Copy error in traf_*mods


Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/17cef884
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/17cef884
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/17cef884

Branch: refs/heads/master
Commit: 17cef884982714d6f9a319f16179c777454b49a5
Parents: ac4fa8f
Author: Amanda Moran <[email protected]>
Authored: Tue Apr 19 00:19:14 2016 +0000
Committer: Amanda Moran <[email protected]>
Committed: Tue Apr 19 00:19:14 2016 +0000

----------------------------------------------------------------------
 core/sqf/conf/install_features             |   1 +
 core/sqf/sqenvcom.sh                       |  23 +-
 install/installer/traf_add_sudoAccess      |  10 +-
 install/installer/traf_apache_mods         | 267 +++++++++++++++++++++
 install/installer/traf_cloudera_mods98     |   6 +-
 install/installer/traf_config_check        |  92 ++++++-
 install/installer/traf_config_setup        | 307 +++++++++++++++++++++---
 install/installer/traf_getHadoopNodes      |   1 -
 install/installer/traf_getMultiHadoopNodes | 145 +++++++++++
 install/installer/trafodion_install        |  23 +-
 10 files changed, 797 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/core/sqf/conf/install_features
----------------------------------------------------------------------
diff --git a/core/sqf/conf/install_features b/core/sqf/conf/install_features
index 0fd7e98..f65cdc7 100644
--- a/core/sqf/conf/install_features
+++ b/core/sqf/conf/install_features
@@ -64,3 +64,4 @@
 export CDH_5_3_HDP_2_2_SUPPORT="N"
 export HDP_2_3_SUPPORT="Y"
 export CDH_5_4_SUPPORT="Y"
+export APACHE_1_0_X_SUPPORT="Y"

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/core/sqf/sqenvcom.sh
----------------------------------------------------------------------
diff --git a/core/sqf/sqenvcom.sh b/core/sqf/sqenvcom.sh
index ecc8a9a..7e864f7 100644
--- a/core/sqf/sqenvcom.sh
+++ b/core/sqf/sqenvcom.sh
@@ -506,23 +506,6 @@ EOF
   APACHE_HIVE_HOME=$HIVE_HOME
   export HIVE_CNF_DIR=$HIVE_HOME/conf
 
-  for cp in `echo $CLASSPATH | sed 's/:/ /g'`
-  do
-    if [ -f $cp/core-site.xml ]; then
-      export HADOOP_CNF_DIR=$cp
-      APACHE_HADOOP_HOME=$(dirname $(dirname $cp))
-    fi
-    if [ -f $cp/hbase-site.xml ]; then
-      [[ $SQ_VERBOSE == 1 ]] && echo "Found $cp/hbase-site.xml in CLASSPATH, 
this is vanilla Apache"
-      export HBASE_CNF_DIR=$cp
-      APACHE_HBASE_HOME=`dirname $cp`
-    fi
-    if [ -f $cp/hive-site.xml ]; then
-      export HIVE_CNF_DIR=$cp
-      APACHE_HIVE_HOME=`dirname $cp`
-    fi
-  done
-
   # sometimes, conf file and lib files don't have the same parent,
   # try to handle some common cases, where the libs are under /usr/lib
   if [ ! -d $APACHE_HADOOP_HOME/lib/ -a -d /usr/lib/hadoop ]; then
@@ -578,8 +561,10 @@ EOF
     export SQL_JAR=trafodion-sql-${HBVER}-${TRAFODION_VER}.jar
   else
     # print usage information, not enough information about Hadoop/HBase
-    vanilla_apache_usage
-    NEEDS_HADOOP_INSTALL=1
+    if [[ -z $HADOOP_TYPE ]]; then
+       vanilla_apache_usage
+       NEEDS_HADOOP_INSTALL=1
+    fi
   fi
 
 fi

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/traf_add_sudoAccess
----------------------------------------------------------------------
diff --git a/install/installer/traf_add_sudoAccess 
b/install/installer/traf_add_sudoAccess
index 23fd67f..d0e4a45 100755
--- a/install/installer/traf_add_sudoAccess
+++ b/install/installer/traf_add_sudoAccess
@@ -40,8 +40,8 @@ function backupRestore {
 
 source $TRAF_CONFIG
 
-echo "## Allow $BACKUP_USER id to run commands needed for backup and restore" 
> $BACKUP_USER
-echo "%$BACKUP_USER ALL =(hbase) NOPASSWD: /usr/bin/hbase" >> $BACKUP_USER
+sudo echo "## Allow $BACKUP_USER id to run commands needed for backup and 
restore" > $BACKUP_USER
+sudo echo "%$BACKUP_USER ALL =(hbase) NOPASSWD: /usr/bin/hbase" >> $BACKUP_USER
 
 }
 
@@ -61,9 +61,9 @@ fi
 function copyFile {
 
 if [[ $all_node_count -gt "1" ]]; then
-   $TRAF_PDCP $BACKUP_USER $HOME
-   $TRAF_PDSH sudo cp $HOME/$BACKUP_USER /etc/sudoers.d/
-   $TRAF_PDSH sudo rm -f $HOME/$BACKUP_USER
+   $TRAF_PDCP $BACKUP_USER $HOME/temp_traf
+   $TRAF_PDSH sudo cp $HOME/temp_traf /etc/sudoers.d/$BACKUP_USER
+   $TRAF_PDSH sudo rm -f $HOME/temp_traf
 else
    sudo cp $BACKUP_USER /etc/sudoers.d
 fi

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/traf_apache_mods
----------------------------------------------------------------------
diff --git a/install/installer/traf_apache_mods 
b/install/installer/traf_apache_mods
new file mode 100644
index 0000000..2b27a0d
--- /dev/null
+++ b/install/installer/traf_apache_mods
@@ -0,0 +1,267 @@
+#!/bin/bash
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+#
+# This script will configure HBase with HBase-trx
+# and co-processors needed for Trafodion.  It uses
+# Ambari's configs.sh script to do this.
+#
+# NOTE: Only for Ambari installations
+
+TRAF_CONFIG=/etc/trafodion/trafodion_config
+source $TRAF_CONFIG
+
+export PDSH="pdsh -R exec"
+export PDSH_SSH_CMD="ssh -q -n %h"
+export PDCP="pdcp -R ssh"
+
+export PDSH_HADOOP_NODES="$PDSH $MY_HBASE_NODES $PDSH_SSH_CMD"
+export PDCP_HADOOP_NODES="$PDCP $MY_HBASE_NODES"
+#=====================================
+# copy Trafodion hbase trx jar to /usr/lib/hbase/lib
+
+cd $UNTAR_DIR
+
+HDFS_NODE=$(echo $HDFS_NODES | head -n1 | awk '{print $1;}')
+HBASE_NODE=$(echo $HBASE_NODES | head -n1 | awk '{print $1;}')
+echo "export HDFS_NODE=\"$HDFS_NODE\"" >> $TRAF_CONFIG
+echo "export HBASE_NODE=\"$HBASE_NODE\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+source $TRAF_CONFIG
+
+
+hbase_trx_jar="hbase-trx-apache1_0_2-2.0.0.jar"
+
+traf_util_jar="trafodion-utility-*.jar"
+
+
+# The permissions the Trafodion build process creates on the hbase-trx jar
+# files does not work well with the installation process so we change them
+sudo chmod -R 777 $UNTAR_DIR/export/lib
+
+if [ ! -f $UNTAR_DIR/export/lib/$hbase_trx_jar ]; then
+    echo "***ERROR: unable to find $UNTAR_DIR/export/lib/$hbase_trx_jar"
+    exit -1
+fi
+
+# if more than one node then copy to all nodes
+echo "***INFO: copying $hbase_trx_jar to all nodes"
+if [ $node_count -ne 1 ]; then
+    $PDSH_HADOOP_NODES sudo rm -rf  $HBASE_HOME/lib/hbase-trx* 2>/dev/null
+    $TRAF_PDSH mkdir -p $LOCAL_WORKDIR 2>/dev/null
+    $PDSH_HADOOP_NODES mkdir -p $LOCAL_WORKDIR 2>/dev/null
+    cp $UNTAR_DIR/export/lib/$hbase_trx_jar $LOCAL_WORKDIR
+    cp $UNTAR_DIR/export/lib/$traf_util_jar $LOCAL_WORKDIR
+    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$hbase_trx_jar $LOCAL_WORKDIR
+    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$traf_util_jar $LOCAL_WORKDIR
+    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$traf_util_jar $HBASE_HOME/lib
+    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$hbase_trx_jar $HBASE_HOME/lib
+    $PDSH_HADOOP_NODES sudo chmod 644 $HBASE_HOME/lib/$hbase_trx_jar
+    $PDSH_HADOOP_NODES sudo chmod 644 $HBASE_HOME/lib/$traf_util_jar
+
+    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$hbase_trx_jar 2>/dev/null
+    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$traf_util_jar 2>/dev/null
+else
+    for node in $HBASE_NODES
+    do 
+    ssh -q -n $node sudo rm -rf $HBASE_HOME/lib/hbase-trx* 2>/dev/null
+    ssh -q -n $node sudo mkdir -p $TRAF_WORKDIR 2>/dev/null
+    ssh -q -n $node sudo chmod 777 $TRAF_WORKDIR
+    scp -q $UNTAR_DIR/export/lib/$hbase_trx_jar $(whoami)@$node:$TRAF_WORKDIR
+    scp -q $UNTAR_DIR/export/lib/$traf_util_jar $(whoami)@$node:$TRAF_WORKDIR
+    ssh -q -n $node sudo cp $TRAF_WORKDIR/$hbase_trx_jar $HBASE_HOME/lib
+    ssh -q -n $node sudo cp $TRAF_WORKDIR/$traf_util_jar $HBASE_HOME/lib
+    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$hbase_trx_jar
+    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$traf_util_jar
+    done
+fi
+
+#=======================================
+#Check that HBase-trx copied to all nodes
+
+for node in $HBASE_NODES
+do
+   copiedOver=$(ssh -q -n $node sudo ls $HBASE_HOME/lib/hbase-trx* | wc -l)
+   if [[ $copiedOver -ne "1" ]]; then
+      echo "***ERROR: $hbase_trx_jar was not copied on $node"
+      echo "***ERROR: Please investigate why this happened"
+      echo "***ERROR: Trafodion can not start without this. EXITING..."
+      exit -1
+   fi
+done
+
+echo "***INFO: $hbase_trx_jar copied correctly! Huzzah."
+
+
+
+#Copy hbase-site.xml file
+ssh -q -n $HBASE_NODE sudo cp $HBASE_HOME/conf/hbase-site.xml $HOME
+ssh -q -n $HBASE_NODE sudo chown $(whoami).$(whoami) $HOME/hbase-site.xml
+ssh -q -n $HBASE_NODE sudo chmod 777 $HOME/hbase-site.xml
+
+scp -q $(whoami)@$HBASE_NODE:$HOME/hbase-site.xml $HOME
+if [[ $? -gt 1 ]]; then
+   echo "***ERROR: Unable to find $HBASE_HOME/conf/hbase-site.xml file on 
$HBASE_NODE or unable to copy."
+   exit -1
+fi
+sudo cp $HOME/hbase-site.xml $TRAF_WORKDIR
+sudo chown trafodion.trafodion $TRAF_WORKDIR/hbase-site.xml
+
+#=====================================
+# create new directories for bulkload and lobs if not already there
+rm $LOCAL_WORKDIR/traf_temp_output 2>/dev/null
+
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /hbase-staging" 2> $HOME/traf_temp_output'
+if [ $? != 0 ]; then
+   # ok if directory already exists
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
+   if [ $dir_exists -eq 0 ]; then
+      echo "***ERROR: hds dfs -mkdir /hbase-staging' command failed"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
+      exit -1
+   fi
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$HBASE_USER"':'"$HBASE_GROUP" 
'/hbase-staging"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /bulkload" 2> $HOME/traf_temp_output'
+if [ $? != 0 ]; then
+   # ok if directory already exists
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
+   if [ $dir_exists -eq 0 ]; then
+      echo "***ERROR: 'hdfs dfs -mkdir /bulkload' command failed"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
+      exit -1
+   fi
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command " ' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$TRAF_USER"':trafodion /bulkload"'
+
+# Create lobs directory
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /lobs" 2> $HOME/traf_temp_output'
+if [ $? != 0 ]; then
+   # ok if directory already exists
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
+   if [ $dir_exists -eq 0 ]; then
+      echo "***ERROR: 'hdfs dfs -mkdir /lobs' command failed"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
+      exit -1
+   fi
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$TRAF_USER"':trafodion /lobs"'
+
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /trafodion_backups" 2> 
$HOME/traf_temp_output'
+if [ $? != 0 ]; then
+   # ok if directory already exists
+   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
+   if [ $dir_exists -eq 0 ]; then
+      echo "***ERROR: 'hdfs dfs -mkdir /trafodion_backups' command failed"
+      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
+      exit -1
+   fi
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$TRAF_USER"':trafodion 
/trafodion_backups"'
+
+
+ssh -q -n $HDFS_NODE 'rm -rf $HOME/traf_temp_output'
+#=====================================
+#Restart HAdoop
+
+echo "****INFO: Stopping HDFS, HBASE, and Zookeeper" 
+
+
+
+sudo su $HBASE_USER $HBASE_HOME/bin/stop-hbase.sh
+
+sudo su $ZOO_USER $ZOO_HOME/bin/zkServer.sh stop
+
+sudo su $HDFS_USER $HADOOP_PREFIX/sbin/stop-dfs.sh
+
+
+echo 
"***IMPORTANT***********************************************************************"
+echo 
+echo "***INFO: Settings below need to be set for Trafodion to work on Apache 
Hadoop/HBase"
+echo "***INFO: In hdfs-site.xml set dfs.namenode.acls.enabled to true."
+echo "***INFO: In zoo.cfg set maxClientCnxns to 0"
+echo "***INFO: Create $ZOO_HOME/conf/zookeeeper-env.sh and set JAVA_HOME"
+echo "***INFO: In hbase-site.xml set hbase.coprocessor.region.classes to 
org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver, 
org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint, 
org.apache.hadoop.hbase.coprocessor.AggregateImplementation"
+echo "***INFO: In hbase-site.xml set hbase.hregion.impl to 
org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion"
+echo 
+echo 
"***IMPORTANT***********************************************************************"
+
+sleep 15
+
+
+
+sudo su $ZOO_USER $ZOO_HOME/bin/zkServer.sh start
+
+sudo su $HDFS_USER $HADOOP_PREFIX/sbin/start-dfs.sh
+
+ssh -q -n $HDFS_NODE 'sudo su ' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfsadmin -safemode wait"'
+
+sudo su $HBASE_USER $HBASE_HOME/bin/start-hbase.sh
+
+#=====================================
+# NOTE: These command must be done AFTER acls are 
+#       enabled and HDFS has been restarted
+echo "***INFO: Setting HDFS ACLs for snapshot scan support"
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir -p /apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -mkdir -p /apps/hbase/data/archive) command 
failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown ' "$HBASE_USER"':'"$HDFS_USER" 
'/apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive) 
command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -setfacl -R -m user:'"$TRAF_USER"':rwx 
/apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx 
/apps/hbase/data/archive) command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -setfacl -R -m default:user:'"$TRAF_USER"':rwx 
/apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx 
/apps/hbase/data/archive) command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -setfacl -R -m mask::rwx 
/apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx 
/apps/hbase/data/archive) command failed"
+   exit -1
+fi
+
+
+MODS_COMPLETE="Y"
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MODS_COMPLETE\=/d' $TRAF_CONFIG
+echo "export MODS_COMPLETE=\"$MODS_COMPLETE\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+source $TRAF_CONFIG
+
+TRAF_CONFIG_FILE="trafodion_config"
+TRAF_CONFIG_DIR="/etc/trafodion"
+
+if [ $node_count -ne 1 ]; then
+   cp $TRAF_CONFIG $LOCAL_WORKDIR
+   $TRAF_PDCP $LOCAL_WORKDIR/$TRAF_CONFIG_FILE $HOME
+   $TRAF_PDSH sudo mkdir -p $TRAF_CONFIG_DIR
+   $TRAF_PDSH sudo cp $HOME/$TRAF_CONFIG_FILE $TRAF_CONFIG_DIR
+   $TRAF_PDSH sudo chmod 777 $TRAF_CONFIG
+fi
+

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/traf_cloudera_mods98
----------------------------------------------------------------------
diff --git a/install/installer/traf_cloudera_mods98 
b/install/installer/traf_cloudera_mods98
index 39c2766..5559f49 100755
--- a/install/installer/traf_cloudera_mods98
+++ b/install/installer/traf_cloudera_mods98
@@ -165,7 +165,7 @@ if [ $? != 0 ]; then
       exit -1
    fi
 fi
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chown -R' "$TRAF_USER"':trafodion /bulkload"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$TRAF_USER"':trafodion /bulkload"'
 
 # Create lobs directory
 ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command 
"'"$HADOOP_BIN_PATH"'/hadoop fs -mkdir /lobs" 2> $HOME/traf_temp_output'
@@ -221,7 +221,7 @@ curl -k -X PUT -H 'Content-Type:application/json' -u 
$ADMIN:$PASSWORD  --data \
              "value":"true"
              } ]
 }' \
-$URL/api/v1/clusters/$CLUSTER_NAME/services/hdfs/config > 
$LOCAL_WORKDIR/traf_hdfs_config_temp
+$URL/api/v1/clusters/$CLUSTER_NAME/services/$HDFS/config > 
$LOCAL_WORKDIR/traf_hdfs_config_temp
 
 if [ $? != 0 ]; then
     echo "***ERROR: Unable to modify HDFS configuration through Cloudera's 
REST API."
@@ -310,7 +310,7 @@ curl -k -X PUT -H 'Content-Type:application/json' -u 
$ADMIN:$PASSWORD  --data \
         } ]
 
 }' \
-$URL/api/v1/clusters/$CLUSTER_NAME/services/zookeeper/config > 
$LOCAL_WORKDIR/traf_zookeeper_config_temp
+$URL/api/v1/clusters/$CLUSTER_NAME/services/$ZOOKEEPER/config > 
$LOCAL_WORKDIR/traf_zookeeper_config_temp
 
 # in most cases curl does not return an error
 # so curl's actual output needs to be checked, too

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/traf_config_check
----------------------------------------------------------------------
diff --git a/install/installer/traf_config_check 
b/install/installer/traf_config_check
index a1ed2e9..8c45e83 100755
--- a/install/installer/traf_config_check
+++ b/install/installer/traf_config_check
@@ -545,7 +545,7 @@ else
 
    hadoopVersion=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v1/clusters | grep 
version | grep -c CDH)
 
-   if [[ $hadoopVersion -ne "1" ]]; then
+   if [[ $hadoopVersion -lt "1" ]]; then
       hadoopVersion=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v1/clusters | grep 
version | grep -c HDP)
       if [[ $hadoopVersion -ne "1" ]]; then
          errorFound=1
@@ -562,11 +562,14 @@ else
    sudo chmod 777 $TRAF_CONFIG
    sed -i '/HADOOP_TYPE\=/d' $TRAF_CONFIG
    echo "export HADOOP_TYPE=\"$HADOOP_TYPE\"" >> $TRAF_CONFIG
+   echo "export MULTI_CLUSTER=\"N\"" >> $TRAF_CONFIG
    sudo chmod 777 $TRAF_CONFIG
    source $TRAF_CONFIG
 
-   temp=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v1/clusters |grep name | sed 
-e 's@[,]@@'g | sed 's/^[^\:]* ://')
-   CLUSTER_NAME=$(echo $temp | sed -e 's/^"//' -e 's/"$//')
+    if [[ $MULTI_CLUSTER == "N" ]]; then
+      temp=$(curl -k -su $ADMIN:$PASSWORD $URL/api/v1/clusters |grep name | 
sed -e 's@[,]@@'g | sed 's/^[^\:]* ://')
+      CLUSTER_NAME=$(echo $temp | sed -e 's/^"//' -e 's/"$//')
+   fi
    CLUSTER_NAME=${CLUSTER_NAME// /%20}
 
    if [ -z $CLUSTER_NAME ]; then
@@ -595,10 +598,40 @@ else
 fi
 }
 
+function setPath {
+
+export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
+
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/PATH\=/d' $TRAF_CONFIG
+echo "export PATH=\"$PATH\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+source $TRAF_CONFIG
+
+}
+
+function setHBaseDistro {
+
+export HBASE_DISTRO="APACHE" 
+
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/HBASE_DISTRO\=/d' $TRAF_CONFIG
+echo "export HBASE_DISTRO=\"$HBASE_DISTRO\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+source $TRAF_CONFIG
+
+}
+
 function getHadoopNodes {
 
 echo "***INFO: Getting list of all $HADOOP_TYPE nodes"
-$LOCAL_WORKDIR/traf_getHadoopNodes
+
+if [[ $MULTI_CLUSTER == "N" ]] && [[ $HADOOP_TYPE != "apache" ]]; then
+   $LOCAL_WORKDIR/traf_getHadoopNodes
+else
+   $LOCAL_WORKDIR/traf_getMultiHadoopNodes
+fi
+
 
 if [ $? -ne "0" ]; then
    errorFound=1
@@ -652,15 +685,16 @@ function checkRoleGroups {
 
 }
 
+
 function checkClouderaVersion {
 
 if [[ $CDH_5_3_HDP_2_2_SUPPORT == "N" ]]; then
    #Check that Cloudera 5.2 or 5.3 are not installed.
    if [[ "$CDH_5_4_SUPPORT" == "Y" ]] || [[ "$CDH_5_5_SUPPORT" == "Y" ]]; then
-      nameOfVersion=$(ssh -q -n $node grep "Version" $HOME/hbaseVersion.txt | 
sed 's/,.*//' | sed 's/.*\-//' | grep cdh5.[4-5].*)
+      nameOfVersion=$(ssh -q -n $node grep "Version" $HOME/hbaseVersion.txt | 
sed 's/,.*//' | sed 's/.*\-//' | grep cdh5.[4-6].*)
       #Check that Cloudera 5.[n>4].* is not installed.
       if [[ -z $nameOfVersion ]]; then
-         versionInstalled=$(ssh -q -n $node grep "Version" 
$HOME/hbaseVersion.txt | sed 's/,.*//' | sed 's/.*\-//' | grep cdh[6-9].[0-9].* 
| wc -l)
+         versionInstalled=$(ssh -q -n $node grep "Version" 
$HOME/hbaseVersion.txt | sed 's/,.*//' | sed 's/.*\-//' | grep cdh[5-9].[7-9].* 
| wc -l)
          if [[ $versionInstalled -gt "0" ]]; then
             errorFound=1
             echo "HADOOP VERSION" >> $ERROR_LOG
@@ -830,6 +864,25 @@ else
    fi
 fi
 
+
+if [[ -z "$DCS_MASTER_PORT" ]]; then
+   sudo chmod 777 $TRAF_CONFIG
+   echo "export DCS_MASTER_PORT=\"23400\"" >> $TRAF_CONFIG
+   sudo chmod 777 $TRAF_CONFIG
+fi
+
+if [[ -z "$DCS_MASTER_HOST" ]]; then
+   if [[ "$ENABLE_HA" == "true" ]]; then
+      sudo chmod 777 $TRAF_CONFIG
+      echo "export DCS_MASTER_HOST=\"$FLOATING_IP\"" >> $TRAF_CONFIG
+      sudo chmod 777 $TRAF_CONFIG
+   else
+      sudo chmod 777 $TRAF_CONFIG
+      echo "export DCS_MASTER_HOST=\"$DCS_PRIMARY_MASTER_NODE\"" >> 
$TRAF_CONFIG
+      sudo chmod 777 $TRAF_CONFIG
+   fi
+fi
+
 }
 
 function checkLDAP {
@@ -895,16 +948,27 @@ if [[ ! -z $install_features_path ]]; then
       echo "export HDP_2_3_SUPPORT=\"$HDP_2_3_SUPPORT\"" >> $TRAF_CONFIG
       sudo chmod 777 $TRAF_CONFIG
    fi
+   if [[ -z $APACHE_1.0_X_SUPPORT ]]; then
+      sudo chmod 777 $TRAF_CONFIG
+      echo "export APACHE_1_0_X_SUPPORT=\"N\"" >> $TRAF_CONFIG
+      sudo chmod 777 $TRAF_CONFIG
+   else
+      sudo chmod 777 $TRAF_CONFIG
+      echo "export APACHE_1_0_X_SUPPORT=\"Y\"" >> $TRAF_CONFIG
+      sudo chmod 777 $TRAF_CONFIG      
+   fi
 else
    CDH_5_3_HDP_2_2_SUPPORT="N"
    CDH_5_4_SUPPORT="N"
    CDH_5_5_SUPPORT="N"
    HDP_2_3_SUPPORT="N"
+   APACHE_1_0_X_SUPPORT="N"
    sudo chmod 777 $TRAF_CONFIG
    echo "export CDH_5_3_HDP_2_2_SUPPORT=\"$CDH_5_3_HDP_2_2_SUPPORT\"" >> 
$TRAF_CONFIG
    echo "export CDH_5_4_SUPPORT=\"$CDH_5_4_SUPPORT\"" >> $TRAF_CONFIG
    echo "export CDH_5_5_SUPPORT=\"$CDH_5_5_SUPPORT\"" >> $TRAF_CONFIG
    echo "export HDP_2_3_SUPPORT=\"$HDP_2_3_SUPPORT\"" >> $TRAF_CONFIG
+   echo "export APACHE_1_0_X_SUPPORT=\"$APACHE_1_0_X_SUPPORT\"" >> $TRAF_CONFIG
    sudo chmod 777 $TRAF_CONFIG
 fi
 
@@ -914,7 +978,7 @@ fi
 #===========================================
 #Main 
 
-echo " ***INFO: Trafodion Configuration File Check"
+echo "***INFO: Trafodion Configuration File Check"
 
 setUpErrorLog
 
@@ -944,11 +1008,19 @@ checkEPEL
 
 checkHadoopSupport
 
-checkBackupUser
+if [[ "$HADOOP_TYPE" != "apache" ]]; then
+   checkBackupUser
+
+   checkHadoopUserPass
 
-checkHadoopUserPass
+   checkHadoopURL
 
-checkHadoopURL
+fi
+
+if [[ "$HADOOP_TYPE" == "apache" ]]; then
+   setPath
+   setHBaseDistro
+fi
 
 getHadoopNodes
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/traf_config_setup
----------------------------------------------------------------------
diff --git a/install/installer/traf_config_setup 
b/install/installer/traf_config_setup
index 63704d5..c1a993a 100755
--- a/install/installer/traf_config_setup
+++ b/install/installer/traf_config_setup
@@ -49,9 +49,15 @@ ADMIN="admin"
 PASSWORD="admin"
 URL=""
 HADOOP_NODES=""
+
+HADOOP_PREFIX=""
+HBASE_HOME=""
+HIVE_HOME=""
+
 HDFS_USER="hdfs"
 HBASE_USER="hbase"
 HBASE_GROUP="hbase"
+ZOO_USER="zookeeper"
 
 SQ_ROOT=""
 INIT_TRAFODION="N"
@@ -64,6 +70,8 @@ BACKUP_DCS_NODES=""
 LDAP_SECURITY="N"
 SCANNER_MODE="N"
 DCS_PRIMARY_MASTER_NODE=`hostname -f`
+DCS_MASTER_PORT="23400"
+DCS_MASTER_HOST="$DCS_PRIMARY_MASTER_NODE"
 CLOUD_CONFIG="N"
 CLOUD_TYPE=""
 AWS_CLOUD=""
@@ -407,61 +415,270 @@ else
 fi
 
 #==============================================
-#Hadoop user name
+#Enter Hadoop Type
 
-echo -n "Enter Hadoop admin username, default is [$ADMIN]: "
-read answer
+echo -n "Specify the version of Hadoop installed (1: Cloudera, 2: Hortonworks, 
3: Other): "
+read answer1
+  if [[ -z "$answer1" ]]; then
+     echo "***ERROR: User must specify the version of Hadoop being used."
+     exit -1
+  fi
+  case "$answer1" in
+    1) HADOOP_TYPE="cloudera"
+       echo "export HADOOP_TYPE=\"$HADOOP_TYPE\"" >> $LOCAL_TRAF_CONFIG
+       ;;
+    2) HADOOP_TYPE="hortonworks"
+       echo "export HADOOP_TYPE=\"$HADOOP_TYPE\"" >> $LOCAL_TRAF_CONFIG
+       ;;
+    3) HADOOP_TYPE="apache"
+       echo "export HADOOP_TYPE=\"$HADOOP_TYPE\"" >> $LOCAL_TRAF_CONFIG
+  esac
 
-if [ -z $answer ]; then
-   echo "export ADMIN=\"$ADMIN\"" >> $LOCAL_TRAF_CONFIG
-   username=$ADMIN
-else
-   echo "export ADMIN=\"$answer\"" >> $LOCAL_TRAF_CONFIG
-   username=$answer
-fi
+export $HADOOP_TYPE
 #==============================================
-#Hadoop Password
+if [[ "$HADOOP_TYPE" == "apache" ]]; then
+   echo -n "Enter Hadoop installed full path (example: '/opt/hadoop-2.6.0'), 
default is [$HADOOP_PREFIX]: "
+   read answer
 
-echo -n "Enter Hadoop admin password, default is [$PASSWORD]: "
-read answer
+   if [ -z  $answer ]; then
+      if [[ -z $HADOOP_PREFIX ]]; then
+         echo "***ERROR: Must enter Hadoop installed path"
+         exit -1
+      fi
+   else
+      if [[ -e $answer ]]; then
+         HADOOP_PREFIX=$answer
+      else
+         echo "***ERROR: Hadoop installed path doesn't exist"
+         exit -1
+      fi
+   fi
+   echo "export HADOOP_PREFIX=\"$HADOOP_PREFIX\"" >> $LOCAL_TRAF_CONFIG
+
+   #Hbase path 
+   echo -n "Enter HBase installed full path (example: '/opt/hbase-1.2.0'), 
default is [$HBASE_HOME]: "
+   read answer
+
+   if [ -z  $answer ]; then
+      if [[ -z $HBASE_HOME ]]; then
+         echo "***ERROR: Must enter HBase installed path"
+         exit -1
+      fi
+   else
+      if [[ -e $answer ]]; then
+         HBASE_HOME=$answer
+      else
+         echo "***ERROR: HBase installed path doesn't exist"
+         exit -1
+      fi
+   fi
+   echo "export HBASE_HOME=\"$HBASE_HOME\"" >> $LOCAL_TRAF_CONFIG
+   
+   #Zookeeper path
+   echo -n "Enter Zookeeper installed full path (example: '/opt/zoo'), default 
is [$ZOO_HOME]: "
+   read answer0
+
+   if [ -z  $answer0 ]; then
+      if [[ -z $ZOO_HOME ]]; then
+         echo "***ERROR: Must enter HBase installed path"
+         exit -1
+      fi
+   else
+      if [[ -e $answer0 ]]; then
+         ZOO_HOME=$answer0
+      else
+         echo "***ERROR: HBase installed path doesn't exist"
+         exit -1
+      fi
+   fi
+   echo "export ZOO_HOME=\"$ZOO_HOME\"" >> $LOCAL_TRAF_CONFIG
+
+   #Hive path 
+
+   echo -n "Is Hive Installed (Y/N), default is N: "
+   read answer
+
+   if [[ "${answer}" =~ ^[Yy]$ ]]; then
+
+      echo -n "Enter Hive installed full path, default is [$HIVE_HOME]: "
+      read answer1
+
+      if [ -z  $answer1 ]; then
+         if [[ -z $HIVE_HOME ]]; then
+            echo "***ERROR: Must enter apache Hive installed path"
+            exit -1
+         fi
+      else
+         if [[ -e $answer1 ]]; then
+            HIVE_HOME=$answer
+         else
+            echo "***ERROR: apache Hive installed path doesn't exist"
+            exit -1
+         fi
+      fi
+   fi
+   echo "export HIVE_HOME=\"$HIVE_HOME\"" >> $LOCAL_TRAF_CONFIG
+
+   echo -n "Enter list of all nodes in this cluster (blank separated), default 
[$HADOOP_NODES]: "
+   read answer2
+   if [[ -z "$answer2" ]]; then
+      if [ -z "$HADOOP_NODES" ]; then
+         echo "***ERROR: All nodes in this cluster must be specified."
+         exit -1
+      fi
+   else
+      HADOOP_NODES="$answer2"
+      echo "export HADOOP_NODES=\"$HADOOP_NODES\"" >> $LOCAL_TRAF_CONFIG
+   fi
+
+   echo -n "Enter list of all HBase nodes in this cluster (blank separated), 
default [$HBASE_NODES]: "
+   read answer3
+   if [[ -z "$answer3" ]]; then
+      if [ -z "$HBASE_NODES" ]; then
+         echo "***ERROR: All nodes in this cluster must be specified."
+         exit -1
+      fi
+   else
+      HBASE_NODES="$answer3"
+      echo "export HBASE_NODES=\"$HBASE_NODES\"" >> $LOCAL_TRAF_CONFIG
+   fi
+
+   echo -n "Enter list of all HDFS nodes in this cluster (blank separated), 
default [$HDFS_NODES]: "
+   read answer4
+   if [[ -z "$answer4" ]]; then
+      if [ -z "$HDFS_NODES" ]; then
+         echo "***ERROR: All nodes in this cluster must be specified."
+         exit -1
+      fi
+   else
+      HDFS_NODES="$answer4"
+      echo "export HDFS_NODES=\"$HDFS_NODES\"" >> $LOCAL_TRAF_CONFIG
+   fi
 
-if [ -z $answer ]; then
-   echo "export PASSWORD=\"$PASSWORD\"" >> $LOCAL_TRAF_CONFIG
-   password=$PASSWORD
-else
-   echo "export PASSWORD=\"$answer\"" >> $LOCAL_TRAF_CONFIG
-   password=$answer
 fi
 
 #==============================================
-#Hadoop URL
+#Hadoop user name
 
-echo -n "Enter full Hadoop external network URL:port (include 'http://' or 
'https://), default is [$URL]: "
-read answer
+if [[ "$HADOOP_TYPE" != "apache" ]]; then
+
+   echo -n "Enter Hadoop admin username, default is [$ADMIN]: "
+   read answer
+
+   if [ -z $answer ]; then
+      echo "export ADMIN=\"$ADMIN\"" >> $LOCAL_TRAF_CONFIG
+      username=$ADMIN
+   else
+      echo "export ADMIN=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+      username=$answer
+   fi
+   #==============================================
+   #Hadoop Password
+
+   echo -n "Enter Hadoop admin password, default is [$PASSWORD]: "
+   read answer
+
+   if [ -z $answer ]; then
+      echo "export PASSWORD=\"$PASSWORD\"" >> $LOCAL_TRAF_CONFIG
+      password=$PASSWORD
+   else
+      echo "export PASSWORD=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+      password=$answer
+   fi
 
-if [ -z  $answer ]; then
-   if [[ -z $URL ]]; then
-      echo "***ERROR: Must enter $HADOOP_TYPE external network URL"
+   #==============================================
+   #Hadoop URL
+
+   echo -n "Enter full Hadoop external network URL:port (include 'http://' or 
'https://), default is [$URL]: "
+   read answer
+
+   if [ -z  $answer ]; then
+      if [[ -z $URL ]]; then
+         echo "***ERROR: Must enter $HADOOP_TYPE external network URL"
+         exit -1
+      fi
+   else
+      URL=$answer
+   fi
+
+   validURL=$(curl -k -s --head $URL | head -n 1 | grep "OK" | wc -l)
+
+   if [[ $validURL -eq "1" ]]; then
+      echo "export URL=\"$URL\"" >> $LOCAL_TRAF_CONFIG
+   else
+      echo "***ERROR: Could not access $URL"
+      echo "***ERROR: Check that URL and port are correct or if $HADOOP_TYPE 
is up"
       exit -1
    fi
-else
-   URL=$answer
 fi
+#==============================================
 
-validURL=$(curl -k -s --head $URL | head -n 1 | grep "OK" | wc -l)
+#echo -n "Is this a multi-cluster setup (Y/N), default is N: "
+#read answer
+
+#if [[ "${answer}" =~ ^[Yy]$ ]]; then
+#   echo "export MULTI_CLUSTER=\"Y\"" >> $LOCAL_TRAF_CONFIG
+   
+#   echo -n "Enter full Hadoop Cluster name, default is [$CLUSTER_NAME]: "
+#   read answer
+
+#   answer=${answer// /%20}
+
+#   if [ -z $answer ]; then
+#      if [[ -z $CLUSTER_NAME ]]; then
+#         echo "***ERROR: Must enter full Hadoop Cluster name"
+#         exit -1
+#      fi
+#   else
+#      CLUSTER_NAME=$answer
+#      echo "export CLUSTER_NAME=\"$CLUSTER_NAME\"" >> $LOCAL_TRAF_CONFIG
+#   fi
+ 
+#   echo -n "Enter list of all nodes in this cluster (blank separated), 
default [$HADOOP_NODES]: "
+#   read answer
+#   if [[ -z "$answer" ]]; then
+#      if [ -z "$HADOOP_NODES" ]; then
+#         echo "***ERROR: All nodes in this cluster must be specified."
+#         exit -1
+#      fi
+#   else
+#      HADOOP_NODES="$answer"
+#      echo "export HADOOP_NODES=\"$HADOOP_NODES\"" >> $LOCAL_TRAF_CONFIG
+#   fi
+   
+#   echo -n "Enter list of all HBase nodes in this cluster (blank separated), 
default [$HBASE_NODES]: "
+#   read answer
+#   if [[ -z "$answer" ]]; then
+#      if [ -z "$HBASE_NODES" ]; then
+#         echo "***ERROR: All nodes in this cluster must be specified."
+#         exit -1
+#      fi
+#   else
+#      HBASE_NODES="$answer"
+#      echo "export HBASE_NODES=\"$HBASE_NODES\"" >> $LOCAL_TRAF_CONFIG
+#   fi
+   
+#   echo -n "Enter list of all HDFS nodes in this cluster (blank separated), 
default [$HDFS_NODES]: "
+#   read answer
+#   if [[ -z "$answer" ]]; then
+#      if [ -z "$HDFS_NODES" ]; then
+#         echo "***ERROR: All nodes in this cluster must be specified."
+#         exit -1
+#      fi
+#   else
+#      HDFS_NODES="$answer"
+#      echo "export HDFS_NODES=\"$HDFS_NODES\"" >> $LOCAL_TRAF_CONFIG
+#   fi
+ 
+#else
+   echo "export MULTI_CLUSTER=\"N\"" >> $LOCAL_TRAF_CONFIG
+#fi
 
-if [[ $validURL -eq "1" ]]; then
-   echo "export URL=\"$URL\"" >> $LOCAL_TRAF_CONFIG
-else
-   echo "***ERROR: Could not access $URL"
-   echo "***ERROR: Check that URL and port are correct or if $HADOOP_TYPE is 
up"
-   exit -1
-fi
 
 #==============================================
 #HDFS Username
 
-echo -n "Enter HDFS username, default is [$HDFS_USER]: "
+echo -n "Enter HDFS username or username running HDFS, default is 
[$HDFS_USER]: "
 read answer
 
 if [ -z $answer ]; then
@@ -473,7 +690,7 @@ fi
 #==============================================
 #HBase user
 
-echo -n "Enter HBase username, default is [$HBASE_USER]: "
+echo -n "Enter HBase username or username running HBase, default is 
[$HBASE_USER]: "
 read answer
 
 if [ -z $answer ]; then
@@ -495,6 +712,18 @@ else
 fi
 
 #==============================================
+#HBase group
+
+echo -n "Enter Zookeeper username or username running Zookeeper, default is 
[$ZOO_USER]: "
+read answer
+
+if [ -z $answer ]; then
+   echo "export ZOO_USER=\"$ZOO_USER\"" >> $LOCAL_TRAF_CONFIG
+else
+   echo "export ZOO_USER=\"$answer\"" >> $LOCAL_TRAF_CONFIG
+fi
+
+#==============================================
 #Install location
 
 if [ -z $SQ_ROOT ]; then
@@ -564,6 +793,8 @@ if [ ! -z $answer ]; then
 fi
 
 echo "export DCS_PRIMARY_MASTER_NODE=\"$DCS_PRIMARY_MASTER_NODE\"" >> 
$LOCAL_TRAF_CONFIG
+echo "export DCS_MASTER_HOST=\"$DCS_PRIMARY_MASTER_NODE\"" >> 
$LOCAL_TRAF_CONFIG
+echo "export DCS_MASTER_PORT=\"$DCS_MASTER_PORT\"" >> $LOCAL_TRAF_CONFIG
 
 #==============================================
 #Enable HA
@@ -598,6 +829,8 @@ if [[ "$ENABLE_HA" == "true" ]]; then
       FLOATING_IP="$answer1"
    fi
    echo "export FLOATING_IP=\"$FLOATING_IP\"" >> $LOCAL_TRAF_CONFIG
+   sed -i '/DCS_MASTER_HOST\=/d' $LOCAL_TRAF_CONFIG
+   echo "export DCS_MASTER_HOST=\"$FLOATING_IP\"" >> $LOCAL_TRAF_CONFIG
 
    ######Get the interface used by the floating IP address
    echo -n "Enter interface used for floating IP address: "

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/traf_getHadoopNodes
----------------------------------------------------------------------
diff --git a/install/installer/traf_getHadoopNodes 
b/install/installer/traf_getHadoopNodes
index 0446cb7..26df505 100755
--- a/install/installer/traf_getHadoopNodes
+++ b/install/installer/traf_getHadoopNodes
@@ -81,7 +81,6 @@ if [ $HADOOP_TYPE == "cloudera" ]; then
       HADOOP_NODES="$HADOOP_NODES $hostName"
       MY_HADOOP_NODES="$MY_HADOOP_NODES -w $hostName"
    done < tempFile2
-   rm tempFile
    rm tempFile2
 fi
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/traf_getMultiHadoopNodes
----------------------------------------------------------------------
diff --git a/install/installer/traf_getMultiHadoopNodes 
b/install/installer/traf_getMultiHadoopNodes
new file mode 100755
index 0000000..a109dba
--- /dev/null
+++ b/install/installer/traf_getMultiHadoopNodes
@@ -0,0 +1,145 @@
+#!/bin/bash
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+#
+
+source /etc/trafodion/trafodion_config
+TRAF_CONFIG=/etc/trafodion/trafodion_config
+
+
+if [[ $HADOOP_TYPE == "cloudera" ]]; then
+   if [ -d /opt/cloudera/parcels/CDH ]; then
+      export HADOOP_PATH="/opt/cloudera/parcels/CDH/lib/hbase/lib"
+      export HADOOP_BIN_PATH="/opt/cloudera/parcels/CDH/bin"
+   else
+      export HADOOP_PATH="/usr/lib/hbase/lib"
+      export HADOOP_BIN_PATH="/usr/bin"
+   fi
+
+   echo "***INFO: HADOOP_PATH=$HADOOP_PATH"
+   echo "***INFO: HADOOP_BIN_PATH=$HADOOP_BIN_PATH"
+fi
+
+MY_HBASE_NODES=""
+MY_HDFS_NODES=""
+
+if [[ -z $HBASE_NODES ]]; then
+   echo "***ERROR: List of $HADOOP_TYPE HBase nodes not found."
+   exit -1
+fi
+
+for node in $HBASE_NODES
+do
+   MY_HBASE_NODES="$MY_HBASE_NODES -w $node"
+done
+
+if [[ -z $HDFS_NODES ]]; then
+   echo "***ERROR: List of $HADOOP_TYPE HBase nodes not found."
+   exit -1
+fi
+   
+for node in $HDFS_NODES
+do
+   MY_HDFS_NODES="$MY_HDFS_NODES -w $node"
+done
+
+   
+MY_HADOOP_NODES=""
+
+if [[ -z $HADOOP_NODES ]]; then
+   echo "***ERROR: List of $HADOOP_TYPE nodes not found."
+   exit -1
+fi
+
+
+for node in $HADOOP_NODES
+do
+   MY_HADOOP_NODES="$MY_HADOOP_NODES -w $node"
+done
+
+
+echo "***INFO: $HADOOP_TYPE list of nodes: $HADOOP_NODES"
+echo "***INFO: $HADOOP_TYPE list of HDFS nodes: $HDFS_NODES"
+echo "***INFO: $HADOOP_TYPE list of HBASE nodes: $HBASE_NODES"
+
+
+
+hadoop_node_count=$(echo $HADOOP_NODES | wc -w)
+
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/HADOOP_NODES\=/d' $TRAF_CONFIG
+echo "export HADOOP_NODES=\"$HADOOP_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MY_HADOOP_NODES\=/d' $TRAF_CONFIG
+echo "export MY_HADOOP_NODES=\"$MY_HADOOP_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/HDFS_NODES\=/d' $TRAF_CONFIG
+echo "export HDFS_NODES=\"$HDFS_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MY_HDFS_NODES\=/d' $TRAF_CONFIG
+echo "export MY_HDFS_NODES=\"$MY_HDFS_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/HBASE_NODES\=/d' $TRAF_CONFIG
+echo "export HBASE_NODES=\"$HBASE_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MY_HBASE_NODES\=/d' $TRAF_CONFIG
+echo "export MY_HBASE_NODES=\"$MY_HBASE_NODES\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/HADOOP_PATH\=/d' $TRAF_CONFIG
+echo "export HADOOP_PATH=\"$HADOOP_PATH\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/HADOOP_BIN_PATH\=/d' $TRAF_CONFIG
+echo "export HADOOP_BIN_PATH=\"$HADOOP_BIN_PATH\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/hadoop_node_count\=/d' $TRAF_CONFIG
+echo "export hadoop_node_count=\"$hadoop_node_count\"" >> $TRAF_CONFIG
+
+for node in $HADOOP_NODES
+do
+   ssh -q -n $node echo "***INFO: Testing ssh on $node"
+   if [[ $? -ne "0" ]]; then
+      errorFound=1
+      ERROR_NODES="$ERROR_NODES $node"
+   fi
+done
+
+if [[ $errorFound == "1" ]]; then
+   echo "***ERROR: Could not ssh to $ERROR_NODES."
+   echo "***ERROR: Check permissions and known hosts files."
+   exit -1
+fi
+
+for node in $HADOOP_NODES
+do
+   ssh -q -n $node sudo echo "***INFO: Testing sudo access on $node"
+   if [ $? -ne "0" ]; then
+      error=1
+      ERROR_NODES="$ERROR_NODES $newNode"
+   fi
+done
+
+if [[ $error == "1" ]]; then
+   echo "***ERROR: $ERROR_NODES does not have sudo access."
+   echo "***ERROR: Must have sudo access on all nodes."
+   exit -1
+fi
+
+

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/17cef884/install/installer/trafodion_install
----------------------------------------------------------------------
diff --git a/install/installer/trafodion_install 
b/install/installer/trafodion_install
index 2e25bad..6edca7e 100755
--- a/install/installer/trafodion_install
+++ b/install/installer/trafodion_install
@@ -153,13 +153,18 @@ do
 done
 
 HBASE=$(curl -k -su $ADMIN:$PASSWORD 
$URL/api/v1/clusters/$CLUSTER_NAME/services | grep name | grep hbase | sed -e 
's@[,]@@'g | awk '{print $3}' | sed "s/\"//g")
-
+HDFS=$(curl -k -su $ADMIN:$PASSWORD 
$URL/api/v1/clusters/$CLUSTER_NAME/services | grep name | grep hdfs | sed -e 
's@[,]@@'g | awk '{print $3}' | sed "s/\"//g")
+ZOOKEEPER=$(curl -k -su $ADMIN:$PASSWORD 
$URL/api/v1/clusters/$CLUSTER_NAME/services | grep name | grep zookeeper | sed 
-e 's@[,]@@'g | awk '{print $3}' | sed "s/\"//g")
 
 sudo chmod 777 $TRAF_CONFIG
 sed -i '/hbaseVersion\=/d' $TRAF_CONFIG
 echo "export hbaseVersion=\"$hbaseVersion\"" >> $TRAF_CONFIG
 sed -i '/HBASE\=/d' $TRAF_CONFIG
 echo "export HBASE=\"$HBASE\"" >> $TRAF_CONFIG
+sed -i '/HDFS\=/d' $TRAF_CONFIG
+echo "export HDFS=\"$HDFS\"" >> $TRAF_CONFIG
+sed -i '/ZOOKEEPER\=/d' $TRAF_CONFIG
+echo "export ZOOKEEPER=\"$ZOOKEEPER\"" >> $TRAF_CONFIG
 sed -i '/CDH_VERSION\=/d' $TRAF_CONFIG
 echo "export CDH_VERSION=\"$CDH_VERSION\"" >> $TRAF_CONFIG
 sudo chmod 777 $TRAF_CONFIG
@@ -380,7 +385,6 @@ if [ ${PIPESTATUS[0]} != "0" ]; then
    echo "***ERROR: Exiting..."
    exit -1 
 fi
-
 sudo chmod 777 $TRAF_CONFIG
 sed -i '/INSTALL_LOG\=/d' $TRAF_CONFIG
 echo "export INSTALL_LOG=\"$INSTALL_LOG_CP\"" >> $TRAF_CONFIG
@@ -539,7 +543,9 @@ source $TRAF_CONFIG
 #============================================
 #Check to make sure HBase Version is the correct version
 
-checkHBaseVersion
+if [[ $HADOOP_TYPE != "apache" ]]; then
+   checkHBaseVersion
+fi
 #==============================================
 # Determine Trafodion version by parsing it
 # from the trafodion_server tar file.
@@ -749,6 +755,17 @@ if [[ $MODS_COMPLETE != "Y" ]]; then
          exit -1
       fi
    fi
+   
+    if [[ $HADOOP_TYPE == "apache" ]]; then
+      echo "***INFO: Will run traf_apache_mods" 2>&1 | tee -a $INSTALL_LOG
+      $LOCAL_WORKDIR/traf_apache_mods 2>&1 | tee -a $INSTALL_LOG
+      if [ ${PIPESTATUS[0]}  != "0" ]; then
+         echo "***ERROR: traf_apache_mods exited with error." | tee -a 
$INSTALL_LOG
+         echo "***ERROR: Please check log files." | tee -a $INSTALL_LOG
+         echo "***ERROR: Exiting...." | tee -a $INSTALL_LOG
+         exit -1
+      fi
+   fi
 else
    echo "***INFO: Trafodion Mods has completed successfully in a previous run."
 fi


Reply via email to