Fixes from comments and renamed files

Project: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-trafodion/commit/68eb74de
Tree: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/tree/68eb74de
Diff: http://git-wip-us.apache.org/repos/asf/incubator-trafodion/diff/68eb74de

Branch: refs/heads/master
Commit: 68eb74de9bff40427d8edbab8757d8b653a5c880
Parents: 5785855
Author: Amanda Moran <[email protected]>
Authored: Wed Apr 20 20:41:14 2016 +0000
Committer: Amanda Moran <[email protected]>
Committed: Wed Apr 20 20:41:14 2016 +0000

----------------------------------------------------------------------
 install/installer/traf_apache_mods        |  44 +-
 install/installer/traf_cloudera_mods      | 397 +++++++++++++++++
 install/installer/traf_cloudera_mods98    | 418 -----------------
 install/installer/traf_config_setup       |   8 +-
 install/installer/traf_hortonworks_mods   | 572 ++++++++++++++++++++++++
 install/installer/traf_hortonworks_mods98 | 592 -------------------------
 install/installer/trafodion_install       |  12 +-
 7 files changed, 991 insertions(+), 1052 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/68eb74de/install/installer/traf_apache_mods
----------------------------------------------------------------------
diff --git a/install/installer/traf_apache_mods 
b/install/installer/traf_apache_mods
index 2b27a0d..35bbfcc 100755
--- a/install/installer/traf_apache_mods
+++ b/install/installer/traf_apache_mods
@@ -129,51 +129,31 @@ sudo chown trafodion.trafodion 
$TRAF_WORKDIR/hbase-site.xml
 # create new directories for bulkload and lobs if not already there
 rm $LOCAL_WORKDIR/traf_temp_output 2>/dev/null
 
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /hbase-staging" 2> $HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir -p /hbase-staging"'
 if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: hds dfs -mkdir /hbase-staging' command failed"
-      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
-      exit -1
-   fi
+   echo "***ERROR: hds dfs -mkdir -p /hbase-staging' command failed"
+   exit -1
 fi
 ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$HBASE_USER"':'"$HBASE_GROUP" 
'/hbase-staging"'
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /bulkload" 2> $HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir -p /bulkload"'
 if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: 'hdfs dfs -mkdir /bulkload' command failed"
-      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
-      exit -1
-   fi
+   echo "***ERROR: 'hdfs dfs -mkdir -p /bulkload' command failed"
+   exit -1
 fi
 ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command " ' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$TRAF_USER"':trafodion /bulkload"'
 
 # Create lobs directory
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /lobs" 2> $HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir -p /lobs"'
 if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: 'hdfs dfs -mkdir /lobs' command failed"
-      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
-      exit -1
-   fi
+   echo "***ERROR: 'hdfs dfs -mkdir -p /lobs' command failed"
+   exit -1
 fi
 ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$TRAF_USER"':trafodion /lobs"'
 
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir /trafodion_backups" 2> 
$HOME/traf_temp_output'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -mkdir -p /trafodion_backups" 2> 
$HOME/traf_temp_output'
 if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(ssh -q -n $HDFS_NODE 'grep "File exists" 
$HOME/traf_temp_output | wc -l')
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: 'hdfs dfs -mkdir /trafodion_backups' command failed"
-      echo "***ERROR: $(ssh -q -n $HDFS_NODE cat $HOME/traf_temp_output)"
-      exit -1
-   fi
+   echo "***ERROR: 'hdfs dfs -mkdir -p /trafodion_backups' command failed"
+   exit -1
 fi
 ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_PREFIX"'/bin/hdfs dfs -chown -R' "$TRAF_USER"':trafodion 
/trafodion_backups"'
 

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/68eb74de/install/installer/traf_cloudera_mods
----------------------------------------------------------------------
diff --git a/install/installer/traf_cloudera_mods 
b/install/installer/traf_cloudera_mods
new file mode 100755
index 0000000..2f98efa
--- /dev/null
+++ b/install/installer/traf_cloudera_mods
@@ -0,0 +1,397 @@
+#!/bin/bash
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+#
+# This script will configure HBase with HBase-trx 
+# and co-processors needed for Trafodion.  It uses
+# Cloudera Manager's REST api to do this.
+#
+# NOTE: Only for Cloudera installations
+
+TRAF_CONFIG=/etc/trafodion/trafodion_config
+source $TRAF_CONFIG
+export PDSH="pdsh -R exec"
+export PDSH_SSH_CMD="ssh -q -n %h"
+export PDCP="pdcp -R ssh"
+export PDSH_HADOOP_NODES="$PDSH $MY_HBASE_NODES $PDSH_SSH_CMD"
+export PDCP_HADOOP_NODES="$PDCP $MY_HBASE_NODES"
+export HDFS_NODE=$(echo $HDFS_NODES | head -n1 | awk '{print $1;}')
+export HBASE_NODE=$(echo $HBASE_NODES | head -n1 | awk '{print $1;}')
+#=====================================
+# copy Trafodion trx jar to Cloudera's plugins directory on all nodes
+
+cd $UNTAR_DIR
+
+if [[ $CDH_5_3_HDP_2_2_SUPPORT == "N" ]]; then
+   if [[ $CDH_5_4_SUPPORT == "Y" ]] || [[ $CDH_5_5_SUPPORT == "Y" ]]; then
+      if [[ $CDH_VERSION == "5.4" ]]; then
+         hbase_trx_jar="hbase-trx-cdh5_4-*.jar"
+      else
+         hbase_trx_jar="hbase-trx-cdh5_5-*.jar"
+      fi
+   else
+      hbase_trx_jar="hbase-trx-*.jar" 
+   fi 
+else
+    hbase_trx_jar="hbase-trx-cdh5_3-*.jar"
+fi
+
+traf_util_jar="trafodion-utility-*.jar"
+
+
+# The permissions the Trafodion build process creates on the hbase-trx jar
+# files does not work well with the installation process so we change them
+sudo chmod -R 777 $UNTAR_DIR/export/lib
+
+if [ ! -f $UNTAR_DIR/export/lib/$hbase_trx_jar ]; then
+    echo "***ERROR: unable to find $UNTAR_DIR/export/lib/$hbase_trx_jar"
+    exit -1
+fi
+
+# if more than one node then copy to all nodes
+echo "***INFO: copying $hbase_trx_jar to all nodes"
+if [ $node_count -ne 1 ]; then
+    
+    $PDSH_HADOOP_NODES sudo rm -rf $HADOOP_PATH/hbase-trx* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/share/cmf/lib/plugins/hbase-trx* 
2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/lib/hbase/lib/trafodion* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/share/cmf/lib/plugins/trafodion* 
2>/dev/null
+    $TRAF_PDSH mkdir -p $LOCAL_WORKDIR 2>/dev/null
+    $PDSH_HADOOP_NODES mkdir -p $LOCAL_WORKDIR 2>/dev/null
+    cp $UNTAR_DIR/export/lib/$hbase_trx_jar $LOCAL_WORKDIR
+    cp $UNTAR_DIR/export/lib/$traf_util_jar $LOCAL_WORKDIR
+    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$hbase_trx_jar $LOCAL_WORKDIR
+    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$traf_util_jar $LOCAL_WORKDIR
+    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$hbase_trx_jar $HADOOP_PATH
+    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$traf_util_jar $HADOOP_PATH
+    $PDSH_HADOOP_NODES sudo chmod 644 $HADOOP_PATH/$hbase_trx_jar
+    $PDSH_HADOOP_NODES sudo chmod 644 $HADOOP_PATH/$traf_util_jar
+    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$hbase_trx_jar 2>/dev/null
+    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$traf_util_jar 2>/dev/null
+else
+    for node in $HBASE_NODES
+    do
+    ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
+    ssh -q -n $node sudo rm -rf /usr/share/cmf/lib/plugins/hbase-trx* 
2>/dev/null
+    ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/trafodion* 2>/dev/null
+    ssh -q -n $node sudo rm -rf /usr/share/cmf/lib/plugins/trafodion* 
2>/dev/null
+    ssh -q -n $node sudo rm -rf $HADOOP_PATH/hbase-trx* 2>/dev/null
+    ssh -q -n $node sudo mkdir -p $TRAF_WORKDIR 2>/dev/null
+    ssh -q -n $node sudo chmod 777 $TRAF_WORKDIR
+    scp -q $UNTAR_DIR/export/lib/$hbase_trx_jar $(whoami)@$node:$TRAF_WORKDIR
+    scp -q $UNTAR_DIR/export/lib/$traf_util_jar $(whoami)@$node:$TRAF_WORKDIR
+    ssh -q -n $node sudo cp $TRAF_WORKDIR/$hbase_trx_jar $HADOOP_PATH
+    ssh -q -n $node sudo cp $TRAF_WORKDIR/$traf_util_jar $HADOOP_PATH
+    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$hbase_trx_jar
+    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$traf_util_jar
+    done
+fi
+
+#====================================
+#Make sure hbase-trx*jar got copied
+
+for node in $HBASE_NODES
+do
+   copiedOver=$(ssh -q -n $node sudo ls $HADOOP_PATH/hbase-trx* | wc -l)
+   if [[ $copiedOver -ne "1" ]]; then
+      echo "***ERROR: $hbase_trx_jar was not copied on $node"
+      echo "***ERROR: Please investigate why this happened"
+      echo "***ERROR: Trafodion can not start without this. EXITING..."
+      exit -1
+   fi
+done
+
+echo "***INFO: $hbase_trx_jar copied correctly! Huzzah."
+
+
+
+#=====================================
+# create new directories for bulkload and lobs if not already there
+rm $LOCAL_WORKDIR/traf_temp_output 2>/dev/null
+
+#Copy hbase-site.xml file
+ssh -q -n $HBASE_NODE sudo cp /etc/hbase/conf/hbase-site.xml $HOME
+ssh -q -n $HBASE_NODE sudo chown "$(whoami)"."$(whoami)" "$HOME"/hbase-site.xml
+ssh -q -n $HBASE_NODE sudo chmod 777 $HOME/hbase-site.xml
+
+scp -q $(whoami)@$HBASE_NODE:$HOME/hbase-site.xml $HOME
+if [[ $? -gt 1 ]]; then
+   echo "***ERROR: Unable to find /etc/hbase/conf/hbase-site.xml file on 
$HBASE_NODE or unable to copy."
+   exit -1
+fi
+
+sudo cp $HOME/hbase-site.xml $TRAF_WORKDIR
+sudo chown trafodion.trafodion $TRAF_WORKDIR/hbase-site.xml
+
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -mkdir -p /hbase-staging"'
+if [ $? != 0 ]; then
+   echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir -p /hbase-staging' 
command failed"
+   exit -1
+fi
+
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$HBASE_USER"':'"$HBASE_GROUP"' 
/hbase-staging"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -mkdir -p /bulkload"'
+if [ $? != 0 ]; then
+   echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir -p /bulkload' command 
failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$TRAF_USER"':trafodion /bulkload"'
+
+# Create lobs directory
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command 
"'"$HADOOP_BIN_PATH"'/hadoop fs -mkdir -p /lobs"'
+if [ $? != 0 ]; then
+   echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir -p /lobs' command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command 
"'"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$TRAF_USER"':trafodion /lobs"'
+
+
+#Create Backup directory 
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -mkdir -p /trafodion_backups"'
+if [ $? != 0 ]; then
+   echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir -p /trafodion_backups' 
command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command 
"'"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$TRAF_USER"':trafodion 
/trafodion_backups"'
+
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chmod 777 /trafodion_backups"'
+#=====================================
+# Modify hadoop settings as needed by Trafodion
+
+rm $HOME/traf_hdfs1_config_temp 2> /dev/null
+rm $HOME/traf_hbase_config_temp 2> /dev/null
+
+# change the hdfs configuration using Cloudera's REST API
+curl -k -X PUT -H 'Content-Type:application/json' -u $ADMIN:$PASSWORD  --data \
+'{ "roleTypeConfigs" :  [ {
+        "roleType" : "NAMENODE",
+        "items": [ {
+                "name" : "namenode_java_heapsize",
+        "value" : "1073741824"
+                } ]
+   }, {
+        "roleType" : "SECONDARYNAMENODE",
+        "items":[ {
+                "name" : "secondary_namenode_java_heapsize",
+        "value" : "1073741824"
+                } ]
+     } ],
+    "items": [ {
+             "name":"dfs_namenode_acls_enabled",
+             "value":"true"
+             } ]
+}' \
+$URL/api/v1/clusters/$CLUSTER_NAME/services/$HDFS/config > 
$LOCAL_WORKDIR/traf_hdfs_config_temp
+
+if [ $? != 0 ]; then
+    echo "***ERROR: Unable to modify HDFS configuration through Cloudera's 
REST API."
+    echo "***ERROR: Check if Cloudera URL is correct, may need to enter 
external IP address."
+    echo "***ERROR: Check if iptables/firewall is configured correctly and 
ports a
+    re enabled."
+    echo "***ERROR: Check that HDFS is running without error."
+    exit -1
+fi
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $LOCAL_WORKDIR/traf_hdfs_config_temp | grep Error | wc 
-l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to modify hdfs configuration through Cloudera's 
REST API."
+    echo "***ERROR: Check if Cloudera URL is correct, may need to enter 
external IP address
+."
+    echo "***ERROR: Check if iptables/firewall is configured correctly and 
ports a
+    re enabled."  2>&1 
+    echo "***ERROR: Check that HDFS is running without error."
+    exit -1
+fi
+
+rm $LOCAL_WORKDIR/traf_hdfs_config_temp 2> /dev/null
+
+# change the hbase configuration using Cloudera Manager's REST api
+# NOTE: hbase.regionserver.lease.period is used as it is equivalent to
+#       hbase.client.scanner.timeout.period and Cloudera only allows
+#       hbase.regionserver.lease.period to be set through the REST API.
+curl -k -X PUT -H 'Content-Type:application/json' -u $ADMIN:$PASSWORD  --data \
+'{ "roleTypeConfigs" : [ {
+       "roleType" : "MASTER",
+       "items" : [ { 
+               "name" : "hbase_master_config_safety_valve", 
+        "value" : "<property>\r\n   
<name>hbase.master.distributed.log.splitting</name>\r\n   
<value>false</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.snapshot.master.timeoutMillis</name>\r\n   
<value>600000</value>\r\n</property>\r\n"
+               } ]
+    }, {
+       "roleType" : "REGIONSERVER", 
+       "items" : [ { 
+               "name" : "hbase_coprocessor_region_classes", 
+                "value" : 
"org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation"
+               }, {
+               "name" : "hbase_regionserver_lease_period", 
+               "value" : "600000"
+               }, {
+               "name" : "hbase_regionserver_config_safety_valve", 
+               "value" : "<property>\r\n   <name>hbase.hregion.impl</name>\r\n 
  
<value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>\r\n</property>\r\n
 <property>\r\n   <name>hbase.regionserver.region.split.policy</name>\r\n   
<value>org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy</value>\r\n</property>\r\n
  <property>\r\n   <name>hbase.snapshot.enabled</name>\r\n   
<value>true</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.bulkload.staging.dir</name>\r\n   
<value>/hbase-staging</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.regionserver.region.transactional.tlog</name>\r\n   
<value>true</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.snapshot.region.timeout</name>\r\n   
<value>600000</value>\r\n</property>\r\n "
+               } ] 
+       } ] 
+}' \
+$URL/api/v1/clusters/$CLUSTER_NAME/services/$HBASE/config > 
$LOCAL_WORKDIR/traf_hbase_config_temp
+
+if [ $? != 0 ]; then
+    echo "***ERROR: Unable to modify HBase configuration through Cloudera's 
REST API."
+    echo "***ERROR: Check that HBase is running without error."
+    exit -1
+fi
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $LOCAL_WORKDIR/traf_hbase_config_temp | grep Error | 
wc -l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to modify HBase configuration through Cloudera's 
REST API."
+    echo "***ERROR: Check that HBase is running without error."
+    cat $LOCAL_WORKDIR/traf_hbase_config_temp
+    exit -1
+fi
+curl_error=$(grep message $LOCAL_WORKDIR/traf_hbase_config_temp | wc -l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to modify HBase configuration through Cloudera's 
REST API."
+    echo "***ERROR: Check that HBase is running without error."
+    cat $LOCAL_WORKDIR/traf_hbase_config_temp
+    exit -1
+fi
+rm $LOCAL_WORKDIR/traf_hbase_config_temp 2> /dev/null
+
+# Change zookeeper config using Cloudera REST API
+
+curl -k -X PUT -H 'Content-Type:application/json' -u $ADMIN:$PASSWORD  --data \
+'{ "roleTypeConfigs" :  [ {
+        "roleType" : "SERVER",
+        "items": [ {
+           "name" : "maxClientCnxns",
+           "value" : "0"
+           } ]
+        } ]
+
+}' \
+$URL/api/v1/clusters/$CLUSTER_NAME/services/$ZOOKEEPER/config > 
$LOCAL_WORKDIR/traf_zookeeper_config_temp
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $LOCAL_WORKDIR/traf_zookeeper_config_temp | grep Error 
| wc -l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to modify Zookeeper configuration through 
Cloudera's REST API."
+    echo "***ERROR: Check that Zookeeper is running without error."
+    cat $LOCAL_WORKDIR/traf_zookeeper_config_temp
+    exit -1
+fi
+curl_error=$(grep message $LOCAL_WORKDIR/traf_zookeeper_config_temp | wc -l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to modify Zookeeper configuration through 
Cloudera's REST API."
+    echo "***ERROR: Check that Zookeeper is running without error."
+    cat $LOCAL_WORKDIR/traf_zookeeper_config_temp
+    exit -1
+fi
+rm $LOCAL_WORKDIR/traf_zookeeper_config_temp 2> /dev/null
+
+
+#=====================================
+# restart Cloudera to pick up all the changes just made
+poll_time=30
+echo "***INFO: restarting Hadoop to pickup Trafodion transaction jar"
+echo "***INFO: ...polling every $poll_time seconds until restart is completed."
+restart_info=$(curl -k -X POST -u $ADMIN:$PASSWORD \
+    $URL/api/v1/clusters/$CLUSTER_NAME/commands/restart)
+echo $restart_info
+command_id=$(echo $restart_info | grep id | awk '{print $4}' | sed -e 's@,@@' )
+echo "***DEBUG: Cloudera command_id=$command_id"
+
+# poll until restart is completed as a restart can take a while
+active=1
+while [ $active -ne 0 ]; do
+    sleep $poll_time
+    curl -k -u $ADMIN:$PASSWORD \
+        $URL/api/v1/commands/$command_id \
+        > $LOCAL_WORKDIR/hbase_restart_status_temp
+    cat $LOCAL_WORKDIR/hbase_restart_status_temp
+    echo "***INFO: ...polling every $poll_time seconds until restart is 
completed."
+    # if restart command is still active, then active will not equal 0
+    active=$(cat $LOCAL_WORKDIR/hbase_restart_status_temp | grep '"active" : 
true' | wc -l)
+done
+
+# make sure restart completed successfully
+failures=$(cat $LOCAL_WORKDIR/hbase_restart_status_temp | grep '"success" : 
false' | wc -l)
+if [ $failures -ne 0 ]; then
+    echo "***ERROR: Unable to restart Hadoop."
+    exit -1
+fi
+
+echo "***INFO: Hadoop restart completed successfully"
+
+# wait to make sure HDFS is fully restarted and out of safemode
+echo "***INFO: waiting for HDFS to exit safemode"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs 
dfsadmin -safemode wait"'
+
+#====================================================
+# NOTE: These command must be done AFTER acls are 
+#       enabled and HDFS has been restarted
+echo "***INFO: Setting HDFS ACLs for snapshot scan support"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-mkdir -p /hbase/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -mkdir -p /hbase/archive) 
command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-chown hbase:hbase /hbase/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -chown hbase:hbase 
/hbase/archive) command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-setfacl -R -m user:'"$TRAF_USER"':rwx /hbase/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m 
user:$TRAF_USER:rwx /hbase/archive) command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-setfacl -R -m default:user:'"$TRAF_USER"':rwx /hbase/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m 
default:user:$TRAF_USER:rwx /hbase/archive) command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-setfacl -R -m mask::rwx /hbase/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m mask::rwx 
/hbase/archive) command failed"
+   exit -1
+fi
+
+MODS_COMPLETE="Y"
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MODS_COMPLETE\=/d' $TRAF_CONFIG
+echo "export MODS_COMPLETE=\"$MODS_COMPLETE\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+source $TRAF_CONFIG
+
+TRAF_CONFIG_FILE="trafodion_config"
+TRAF_CONFIG_DIR="/etc/trafodion"
+
+if [ $node_count -ne 1 ]; then
+   cp $TRAF_CONFIG $LOCAL_WORKDIR
+   $TRAF_PDCP $LOCAL_WORKDIR/$TRAF_CONFIG_FILE $HOME
+   $TRAF_PDSH sudo mkdir -p $TRAF_CONFIG_DIR
+   $TRAF_PDSH sudo cp -rf $HOME/$TRAF_CONFIG_FILE $TRAF_CONFIG_DIR
+   $TRAF_PDSH sudo chmod 777 $TRAF_CONFIG
+fi
+

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/68eb74de/install/installer/traf_cloudera_mods98
----------------------------------------------------------------------
diff --git a/install/installer/traf_cloudera_mods98 
b/install/installer/traf_cloudera_mods98
deleted file mode 100755
index 5559f49..0000000
--- a/install/installer/traf_cloudera_mods98
+++ /dev/null
@@ -1,418 +0,0 @@
-#!/bin/bash
-# @@@ START COPYRIGHT @@@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# @@@ END COPYRIGHT @@@
-#
-# This script will configure HBase with HBase-trx 
-# and co-processors needed for Trafodion.  It uses
-# Cloudera Manager's REST api to do this.
-#
-# NOTE: Only for Cloudera installations
-
-TRAF_CONFIG=/etc/trafodion/trafodion_config
-source $TRAF_CONFIG
-export PDSH="pdsh -R exec"
-export PDSH_SSH_CMD="ssh -q -n %h"
-export PDCP="pdcp -R ssh"
-export PDSH_HADOOP_NODES="$PDSH $MY_HBASE_NODES $PDSH_SSH_CMD"
-export PDCP_HADOOP_NODES="$PDCP $MY_HBASE_NODES"
-export HDFS_NODE=$(echo $HDFS_NODES | head -n1 | awk '{print $1;}')
-export HBASE_NODE=$(echo $HBASE_NODES | head -n1 | awk '{print $1;}')
-#=====================================
-# copy Trafodion trx jar to Cloudera's plugins directory on all nodes
-
-cd $UNTAR_DIR
-
-# determine java version for hbase 0.98 only java 1.7 is allowed
-if [[ $CDH_5_3_HDP_2_2_SUPPORT == "N" ]]; then
-   if [[ $CDH_5_4_SUPPORT == "Y" ]] || [[ $CDH_5_5_SUPPORT == "Y" ]]; then
-      if [[ $CDH_VERSION == "5.4" ]]; then
-         hbase_trx_jar="hbase-trx-cdh5_4-*.jar"
-      else
-         hbase_trx_jar="hbase-trx-cdh5_5-*.jar"
-      fi
-   else
-      hbase_trx_jar="hbase-trx-*.jar" 
-   fi 
-else
-    hbase_trx_jar="hbase-trx-cdh5_3-*.jar"
-fi
-
-traf_util_jar="trafodion-utility-*.jar"
-
-
-# The permissions the Trafodion build process creates on the hbase-trx jar
-# files does not work well with the installation process so we change them
-sudo chmod -R 777 $UNTAR_DIR/export/lib
-
-if [ ! -f $UNTAR_DIR/export/lib/$hbase_trx_jar ]; then
-    echo "***ERROR: unable to find $UNTAR_DIR/export/lib/$hbase_trx_jar"
-    exit -1
-fi
-
-# if more than one node then copy to all nodes
-echo "***INFO: copying $hbase_trx_jar to all nodes"
-if [ $node_count -ne 1 ]; then
-    
-    $PDSH_HADOOP_NODES sudo rm -rf $HADOOP_PATH/hbase-trx* 2>/dev/null
-    $PDSH_HADOOP_NODES sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
-    $PDSH_HADOOP_NODES sudo rm -rf /usr/share/cmf/lib/plugins/hbase-trx* 
2>/dev/null
-    $PDSH_HADOOP_NODES sudo rm -rf /usr/lib/hbase/lib/trafodion* 2>/dev/null
-    $PDSH_HADOOP_NODES sudo rm -rf /usr/share/cmf/lib/plugins/trafodion* 
2>/dev/null
-    $TRAF_PDSH mkdir -p $LOCAL_WORKDIR 2>/dev/null
-    $PDSH_HADOOP_NODES mkdir -p $LOCAL_WORKDIR 2>/dev/null
-    cp $UNTAR_DIR/export/lib/$hbase_trx_jar $LOCAL_WORKDIR
-    cp $UNTAR_DIR/export/lib/$traf_util_jar $LOCAL_WORKDIR
-    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$hbase_trx_jar $LOCAL_WORKDIR
-    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$traf_util_jar $LOCAL_WORKDIR
-    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$hbase_trx_jar $HADOOP_PATH
-    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$traf_util_jar $HADOOP_PATH
-    $PDSH_HADOOP_NODES sudo chmod 644 $HADOOP_PATH/$hbase_trx_jar
-    $PDSH_HADOOP_NODES sudo chmod 644 $HADOOP_PATH/$traf_util_jar
-    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$hbase_trx_jar 2>/dev/null
-    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$traf_util_jar 2>/dev/null
-else
-    for node in $HBASE_NODES
-    do
-    ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
-    ssh -q -n $node sudo rm -rf /usr/share/cmf/lib/plugins/hbase-trx* 
2>/dev/null
-    ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/trafodion* 2>/dev/null
-    ssh -q -n $node sudo rm -rf /usr/share/cmf/lib/plugins/trafodion* 
2>/dev/null
-    ssh -q -n $node sudo rm -rf $HADOOP_PATH/hbase-trx* 2>/dev/null
-    ssh -q -n $node sudo mkdir -p $TRAF_WORKDIR 2>/dev/null
-    ssh -q -n $node sudo chmod 777 $TRAF_WORKDIR
-    scp -q $UNTAR_DIR/export/lib/$hbase_trx_jar $(whoami)@$node:$TRAF_WORKDIR
-    scp -q $UNTAR_DIR/export/lib/$traf_util_jar $(whoami)@$node:$TRAF_WORKDIR
-    ssh -q -n $node sudo cp $TRAF_WORKDIR/$hbase_trx_jar $HADOOP_PATH
-    ssh -q -n $node sudo cp $TRAF_WORKDIR/$traf_util_jar $HADOOP_PATH
-    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$hbase_trx_jar
-    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$traf_util_jar
-    done
-fi
-
-#====================================
-#Make sure hbase-trx*jar got copied
-
-for node in $HBASE_NODES
-do
-   copiedOver=$(ssh -q -n $node sudo ls $HADOOP_PATH/hbase-trx* | wc -l)
-   if [[ $copiedOver -ne "1" ]]; then
-      echo "***ERROR: $hbase_trx_jar was not copied on $node"
-      echo "***ERROR: Please investigate why this happened"
-      echo "***ERROR: Trafodion can not start without this. EXITING..."
-      exit -1
-   fi
-done
-
-echo "***INFO: $hbase_trx_jar copied correctly! Huzzah."
-
-
-
-#=====================================
-# create new directories for bulkload and lobs if not already there
-rm $LOCAL_WORKDIR/traf_temp_output 2>/dev/null
-
-#Copy hbase-site.xml file
-ssh -q -n $HBASE_NODE sudo cp /etc/hbase/conf/hbase-site.xml $HOME
-ssh -q -n $HBASE_NODE sudo chown "$(whoami)"."$(whoami)" "$HOME"/hbase-site.xml
-ssh -q -n $HBASE_NODE sudo chmod 777 $HOME/hbase-site.xml
-
-scp -q $(whoami)@$HBASE_NODE:$HOME/hbase-site.xml $HOME
-if [[ $? -gt 1 ]]; then
-   echo "***ERROR: Unable to find /etc/hbase/conf/hbase-site.xml file on 
$HBASE_NODE or unable to copy."
-   exit -1
-fi
-
-sudo cp $HOME/hbase-site.xml $TRAF_WORKDIR
-sudo chown trafodion.trafodion $TRAF_WORKDIR/hbase-site.xml
-
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -mkdir /hbase-staging" 2> $HOME/traf_temp_output'
-if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir /hbase-staging' 
command failed"
-      echo "***ERROR: $(cat $HOME/traf_temp_output)"
-      exit -1
-   fi
-fi
-
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$HBASE_USER"':'"$HBASE_GROUP"' 
/hbase-staging"'
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -mkdir /bulkload" 2> $HOME/traf_temp_output'
-if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir /bulkload' command 
failed"
-      echo "***ERROR: $(cat $HOME/traf_temp_output)"
-      exit -1
-   fi
-fi
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$TRAF_USER"':trafodion /bulkload"'
-
-# Create lobs directory
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command 
"'"$HADOOP_BIN_PATH"'/hadoop fs -mkdir /lobs" 2> $HOME/traf_temp_output'
-if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir /lobs' command failed"
-      echo "***ERROR: $(cat $LOCAL_WORKDIR/traf_temp_output)"
-      exit -1
-   fi
-fi
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command 
"'"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$TRAF_USER"':trafodion /lobs"'
-
-
-#Create Backup directory 
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -mkdir /trafodion_backups" 2> 
$HOME/traf_temp_output'
-if [ $? != 0 ]; then
-   # ok if directory already exists
-   dir_exists=$(grep "File exists" $HOME/traf_temp_output | wc -l)
-   if [ $dir_exists -eq 0 ]; then
-      echo "***ERROR: '$HADOOP_BIN_PATH/hadoop fs -mkdir /trafodion_backups' 
command failed"
-      echo "***ERROR: $(cat $LOCAL_WORKDIR/traf_temp_output)"
-      exit -1
-   fi
-fi
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command 
"'"$HADOOP_BIN_PATH"'/hadoop fs -chown -R '"$TRAF_USER"':trafodion 
/trafodion_backups"'
-
-ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "' 
"$HADOOP_BIN_PATH"'/hadoop fs -chmod 777 /trafodion_backups"'
-#=====================================
-# Modify hadoop settings as needed by Trafodion
-
-rm $HOME/traf_hdfs1_config_temp 2> /dev/null
-rm $HOME/traf_hbase_config_temp 2> /dev/null
-
-# change the hdfs configuration using Cloudera's REST API
-curl -k -X PUT -H 'Content-Type:application/json' -u $ADMIN:$PASSWORD  --data \
-'{ "roleTypeConfigs" :  [ {
-        "roleType" : "NAMENODE",
-        "items": [ {
-                "name" : "namenode_java_heapsize",
-        "value" : "1073741824"
-                } ]
-   }, {
-        "roleType" : "SECONDARYNAMENODE",
-        "items":[ {
-                "name" : "secondary_namenode_java_heapsize",
-        "value" : "1073741824"
-                } ]
-     } ],
-    "items": [ {
-             "name":"dfs_namenode_acls_enabled",
-             "value":"true"
-             } ]
-}' \
-$URL/api/v1/clusters/$CLUSTER_NAME/services/$HDFS/config > 
$LOCAL_WORKDIR/traf_hdfs_config_temp
-
-if [ $? != 0 ]; then
-    echo "***ERROR: Unable to modify HDFS configuration through Cloudera's 
REST API."
-    echo "***ERROR: Check if Cloudera URL is correct, may need to enter 
external IP address."
-    echo "***ERROR: Check if iptables/firewall is configured correctly and 
ports a
-    re enabled."
-    echo "***ERROR: Check that HDFS is running without error."
-    exit -1
-fi
-
-# in most cases curl does not return an error
-# so curl's actual output needs to be checked, too
-curl_error=$(grep TITLE $LOCAL_WORKDIR/traf_hdfs_config_temp | grep Error | wc 
-l)
-if [ $curl_error -ne 0 ]; then
-    echo "***ERROR: Unable to modify hdfs configuration through Cloudera's 
REST API."
-    echo "***ERROR: Check if Cloudera URL is correct, may need to enter 
external IP address
-."
-    echo "***ERROR: Check if iptables/firewall is configured correctly and 
ports a
-    re enabled."  2>&1 
-    echo "***ERROR: Check that HDFS is running without error."
-    exit -1
-fi
-
-rm $LOCAL_WORKDIR/traf_hdfs_config_temp 2> /dev/null
-
-# change the hbase configuration using Cloudera Manager's REST api
-# NOTE: hbase.regionserver.lease.period is used as it is equivalent to
-#       hbase.client.scanner.timeout.period and Cloudera only allows
-#       hbase.regionserver.lease.period to be set through the REST API.
-curl -k -X PUT -H 'Content-Type:application/json' -u $ADMIN:$PASSWORD  --data \
-'{ "roleTypeConfigs" : [ {
-       "roleType" : "MASTER",
-       "items" : [ { 
-               "name" : "hbase_master_config_safety_valve", 
-        "value" : "<property>\r\n   
<name>hbase.master.distributed.log.splitting</name>\r\n   
<value>false</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.snapshot.master.timeoutMillis</name>\r\n   
<value>600000</value>\r\n</property>\r\n"
-               } ]
-    }, {
-       "roleType" : "REGIONSERVER", 
-       "items" : [ { 
-               "name" : "hbase_coprocessor_region_classes", 
-                "value" : 
"org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation"
-               }, {
-               "name" : "hbase_regionserver_lease_period", 
-               "value" : "600000"
-               }, {
-               "name" : "hbase_regionserver_config_safety_valve", 
-               "value" : "<property>\r\n   <name>hbase.hregion.impl</name>\r\n 
  
<value>org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion</value>\r\n</property>\r\n
 <property>\r\n   <name>hbase.regionserver.region.split.policy</name>\r\n   
<value>org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy</value>\r\n</property>\r\n
  <property>\r\n   <name>hbase.snapshot.enabled</name>\r\n   
<value>true</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.bulkload.staging.dir</name>\r\n   
<value>/hbase-staging</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.regionserver.region.transactional.tlog</name>\r\n   
<value>true</value>\r\n</property>\r\n <property>\r\n   
<name>hbase.snapshot.region.timeout</name>\r\n   
<value>600000</value>\r\n</property>\r\n "
-               } ] 
-       } ] 
-}' \
-$URL/api/v1/clusters/$CLUSTER_NAME/services/$HBASE/config > 
$LOCAL_WORKDIR/traf_hbase_config_temp
-
-if [ $? != 0 ]; then
-    echo "***ERROR: Unable to modify HBase configuration through Cloudera's 
REST API."
-    echo "***ERROR: Check that HBase is running without error."
-    exit -1
-fi
-
-# in most cases curl does not return an error
-# so curl's actual output needs to be checked, too
-curl_error=$(grep TITLE $LOCAL_WORKDIR/traf_hbase_config_temp | grep Error | 
wc -l)
-if [ $curl_error -ne 0 ]; then
-    echo "***ERROR: Unable to modify HBase configuration through Cloudera's 
REST API."
-    echo "***ERROR: Check that HBase is running without error."
-    cat $LOCAL_WORKDIR/traf_hbase_config_temp
-    exit -1
-fi
-curl_error=$(grep message $LOCAL_WORKDIR/traf_hbase_config_temp | wc -l)
-if [ $curl_error -ne 0 ]; then
-    echo "***ERROR: Unable to modify HBase configuration through Cloudera's 
REST API."
-    echo "***ERROR: Check that HBase is running without error."
-    cat $LOCAL_WORKDIR/traf_hbase_config_temp
-    exit -1
-fi
-rm $LOCAL_WORKDIR/traf_hbase_config_temp 2> /dev/null
-
-# Change zookeeper config using Cloudera REST API
-
-curl -k -X PUT -H 'Content-Type:application/json' -u $ADMIN:$PASSWORD  --data \
-'{ "roleTypeConfigs" :  [ {
-        "roleType" : "SERVER",
-        "items": [ {
-           "name" : "maxClientCnxns",
-           "value" : "0"
-           } ]
-        } ]
-
-}' \
-$URL/api/v1/clusters/$CLUSTER_NAME/services/$ZOOKEEPER/config > 
$LOCAL_WORKDIR/traf_zookeeper_config_temp
-
-# in most cases curl does not return an error
-# so curl's actual output needs to be checked, too
-curl_error=$(grep TITLE $LOCAL_WORKDIR/traf_zookeeper_config_temp | grep Error 
| wc -l)
-if [ $curl_error -ne 0 ]; then
-    echo "***ERROR: Unable to modify Zookeeper configuration through 
Cloudera's REST API."
-    echo "***ERROR: Check that Zookeeper is running without error."
-    cat $LOCAL_WORKDIR/traf_zookeeper_config_temp
-    exit -1
-fi
-curl_error=$(grep message $LOCAL_WORKDIR/traf_zookeeper_config_temp | wc -l)
-if [ $curl_error -ne 0 ]; then
-    echo "***ERROR: Unable to modify Zookeeper configuration through 
Cloudera's REST API."
-    echo "***ERROR: Check that Zookeeper is running without error."
-    cat $LOCAL_WORKDIR/traf_zookeeper_config_temp
-    exit -1
-fi
-rm $LOCAL_WORKDIR/traf_zookeeper_config_temp 2> /dev/null
-
-
-#=====================================
-# restart Cloudera to pick up all the changes just made
-poll_time=30
-echo "***INFO: restarting Hadoop to pickup Trafodion transaction jar"
-echo "***INFO: ...polling every $poll_time seconds until restart is completed."
-restart_info=$(curl -k -X POST -u $ADMIN:$PASSWORD \
-    $URL/api/v1/clusters/$CLUSTER_NAME/commands/restart)
-echo $restart_info
-command_id=$(echo $restart_info | grep id | awk '{print $4}' | sed -e 's@,@@' )
-echo "***DEBUG: Cloudera command_id=$command_id"
-
-# poll until restart is completed as a restart can take a while
-active=1
-while [ $active -ne 0 ]; do
-    sleep $poll_time
-    curl -k -u $ADMIN:$PASSWORD \
-        $URL/api/v1/commands/$command_id \
-        > $LOCAL_WORKDIR/hbase_restart_status_temp
-    cat $LOCAL_WORKDIR/hbase_restart_status_temp
-    echo "***INFO: ...polling every $poll_time seconds until restart is 
completed."
-    # if restart command is still active, then active will not equal 0
-    active=$(cat $LOCAL_WORKDIR/hbase_restart_status_temp | grep '"active" : 
true' | wc -l)
-done
-
-# make sure restart completed successfully
-failures=$(cat $LOCAL_WORKDIR/hbase_restart_status_temp | grep '"success" : 
false' | wc -l)
-if [ $failures -ne 0 ]; then
-    echo "***ERROR: Unable to restart Hadoop."
-    exit -1
-fi
-
-echo "***INFO: Hadoop restart completed successfully"
-
-# wait to make sure HDFS is fully restarted and out of safemode
-echo "***INFO: waiting for HDFS to exit safemode"
-ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs 
dfsadmin -safemode wait"'
-
-#====================================================
-# NOTE: These command must be done AFTER acls are 
-#       enabled and HDFS has been restarted
-echo "***INFO: Setting HDFS ACLs for snapshot scan support"
-ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-mkdir -p /hbase/archive"'
-if [ $? != 0 ]; then
-   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -mkdir -p /hbase/archive) 
command failed"
-   exit -1
-fi
-ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-chown hbase:hbase /hbase/archive"'
-if [ $? != 0 ]; then
-   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -chown hbase:hbase 
/hbase/archive) command failed"
-   exit -1
-fi
-ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-setfacl -R -m user:'"$TRAF_USER"':rwx /hbase/archive"'
-if [ $? != 0 ]; then
-   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m 
user:$TRAF_USER:rwx /hbase/archive) command failed"
-   exit -1
-fi
-ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-setfacl -R -m default:user:'"$TRAF_USER"':rwx /hbase/archive"'
-if [ $? != 0 ]; then
-   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m 
default:user:$TRAF_USER:rwx /hbase/archive) command failed"
-   exit -1
-fi
-ssh -q -n $HDFS_NODE 'sudo su hdfs --command "' "$HADOOP_BIN_PATH"'/hdfs dfs 
-setfacl -R -m mask::rwx /hbase/archive"'
-if [ $? != 0 ]; then
-   echo "***ERROR: ($HADOOP_BIN_PATH/hdfs dfs -setfacl -R -m mask::rwx 
/hbase/archive) command failed"
-   exit -1
-fi
-
-MODS_COMPLETE="Y"
-sudo chmod 777 $TRAF_CONFIG
-sed -i '/MODS_COMPLETE\=/d' $TRAF_CONFIG
-echo "export MODS_COMPLETE=\"$MODS_COMPLETE\"" >> $TRAF_CONFIG
-sudo chmod 777 $TRAF_CONFIG
-source $TRAF_CONFIG
-
-TRAF_CONFIG_FILE="trafodion_config"
-TRAF_CONFIG_DIR="/etc/trafodion"
-
-if [ $node_count -ne 1 ]; then
-   cp $TRAF_CONFIG $LOCAL_WORKDIR
-   $TRAF_PDCP $LOCAL_WORKDIR/$TRAF_CONFIG_FILE $HOME
-   $TRAF_PDSH sudo mkdir -p $TRAF_CONFIG_DIR
-   $TRAF_PDSH sudo cp -rf $HOME/$TRAF_CONFIG_FILE $TRAF_CONFIG_DIR
-   $TRAF_PDSH sudo chmod 777 $TRAF_CONFIG
-fi
-

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/68eb74de/install/installer/traf_config_setup
----------------------------------------------------------------------
diff --git a/install/installer/traf_config_setup 
b/install/installer/traf_config_setup
index c1a993a..bcd859d 100755
--- a/install/installer/traf_config_setup
+++ b/install/installer/traf_config_setup
@@ -476,16 +476,16 @@ if [[ "$HADOOP_TYPE" == "apache" ]]; then
    
    #Zookeeper path
    echo -n "Enter Zookeeper installed full path (example: '/opt/zoo'), default 
is [$ZOO_HOME]: "
-   read answer0
+   read answer
 
-   if [ -z  $answer0 ]; then
+   if [ -z  $answer ]; then
       if [[ -z $ZOO_HOME ]]; then
          echo "***ERROR: Must enter HBase installed path"
          exit -1
       fi
    else
-      if [[ -e $answer0 ]]; then
-         ZOO_HOME=$answer0
+      if [[ -e $answer ]]; then
+         ZOO_HOME=$answer
       else
          echo "***ERROR: HBase installed path doesn't exist"
          exit -1

http://git-wip-us.apache.org/repos/asf/incubator-trafodion/blob/68eb74de/install/installer/traf_hortonworks_mods
----------------------------------------------------------------------
diff --git a/install/installer/traf_hortonworks_mods 
b/install/installer/traf_hortonworks_mods
new file mode 100755
index 0000000..b1b49d1
--- /dev/null
+++ b/install/installer/traf_hortonworks_mods
@@ -0,0 +1,572 @@
+#!/bin/bash
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+#
+# This script will configure HBase with HBase-trx
+# and co-processors needed for Trafodion.  It uses
+# Ambari's configs.sh script to do this.
+#
+# NOTE: Only for Ambari installations
+
+TRAF_CONFIG=/etc/trafodion/trafodion_config
+source $TRAF_CONFIG
+
+export PDSH="pdsh -R exec"
+export PDSH_SSH_CMD="ssh -q -n %h"
+export PDCP="pdcp -R ssh"
+
+export PDSH_HADOOP_NODES="$PDSH $MY_HBASE_NODES $PDSH_SSH_CMD"
+export PDCP_HADOOP_NODES="$PDCP $MY_HBASE_NODES"
+#=====================================
+# copy Trafodion hbase trx jar to /usr/lib/hbase/lib
+
+cd $UNTAR_DIR
+
+PORT=`echo $URL | sed 's/.*://'`
+AMBARI_HOST=$(echo $URL | sed 's@.*://@@' | sed 's@:.*@@')
+HDFS_NODE=$(echo $HDFS_NODES | head -n1 | awk '{print $1;}')
+HBASE_NODE=$(echo $HBASE_NODES | head -n1 | awk '{print $1;}')
+echo "export AMBARI_HOST=\"$AMBARI_HOST\"" >> $TRAF_CONFIG
+echo "export HDFS_NODE=\"$HDFS_NODE\"" >> $TRAF_CONFIG
+echo "export HBASE_NODE=\"$HBASE_NODE\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+source $TRAF_CONFIG
+
+#determine java version and choose corresponding jar files
+if [[ $CDH_5_3_HDP_2_2_SUPPORT == "N" ]]; then
+   if [[ $HDP_2_3_SUPPORT == "Y" ]]; then
+      hbase_trx_jar="hbase-trx-hdp2_3-*.jar"
+   else
+      hbase_trx_jar="hbase-trx-hdp2_1-*.jar"
+   fi
+else
+   hbase_trx_jar="hbase-trx-hdp2_2-*.jar"
+fi
+
+traf_util_jar="trafodion-utility-*.jar"
+
+
+# The permissions the Trafodion build process creates on the hbase-trx jar
+# files does not work well with the installation process so we change them
+sudo chmod -R 777 $UNTAR_DIR/export/lib
+
+if [ ! -f $UNTAR_DIR/export/lib/$hbase_trx_jar ]; then
+    echo "***ERROR: unable to find $UNTAR_DIR/export/lib/$hbase_trx_jar"
+    exit -1
+fi
+
+# if more than one node then copy to all nodes
+echo "***INFO: copying $hbase_trx_jar to all nodes"
+if [ $node_count -ne 1 ]; then
+    $PDSH_HADOOP_NODES sudo rm -rf  $HADOOP_PATH/hbase-trx* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf 
/usr/hdp/current/hbase-regionserver/lib/hbase-trx* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/share/cmf/lib/plugins/hbase-trx* 
2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/lib/hbase/lib/trafodion* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf 
/usr/hdp/current/hbase-regionserver/lib/trafodion* 2>/dev/null
+    $PDSH_HADOOP_NODES sudo rm -rf /usr/share/cmf/lib/plugins/trafodion* 
2>/dev/null
+    $TRAF_PDSH mkdir -p $LOCAL_WORKDIR 2>/dev/null
+    $PDSH_HADOOP_NODES mkdir -p $LOCAL_WORKDIR 2>/dev/null
+    cp $UNTAR_DIR/export/lib/$hbase_trx_jar $LOCAL_WORKDIR
+    cp $UNTAR_DIR/export/lib/$traf_util_jar $LOCAL_WORKDIR
+    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$hbase_trx_jar $LOCAL_WORKDIR
+    $PDCP_HADOOP_NODES $LOCAL_WORKDIR/$traf_util_jar $LOCAL_WORKDIR
+    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$traf_util_jar $HADOOP_PATH
+    $PDSH_HADOOP_NODES sudo cp $LOCAL_WORKDIR/$hbase_trx_jar $HADOOP_PATH
+    $PDSH_HADOOP_NODES sudo chmod 644 $HADOOP_PATH/$hbase_trx_jar
+    $PDSH_HADOOP_NODES sudo chmod 644 $HADOOP_PATH/$traf_util_jar
+
+    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$hbase_trx_jar 2>/dev/null
+    $PDSH_HADOOP_NODES rm $LOCAL_WORKDIR/$traf_util_jar 2>/dev/null
+else
+    for node in $HBASE_NODES
+    do 
+    ssh -q -n $node sudo rm -rf $HADOOP_PATH/hbase-trx* 2>/dev/null
+    ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/hbase-trx* 2>/dev/null
+    ssh -q -n $node sudo rm -rf /usr/share/cmf/lib/plugins/hbase-trx* 
2>/dev/null
+    ssh -q -n $node sudo rm -rf /usr/lib/hbase/lib/trafodion* 2>/dev/null
+    ssh -q -n $node sudo rm -rf /usr/share/cmf/lib/plugins/trafodion* 
2>/dev/null
+    ssh -q -n $node sudo mkdir -p $TRAF_WORKDIR 2>/dev/null
+    ssh -q -n $node sudo chmod 777 $TRAF_WORKDIR
+    scp -q $UNTAR_DIR/export/lib/$hbase_trx_jar $(whoami)@$node:$TRAF_WORKDIR
+    scp -q $UNTAR_DIR/export/lib/$traf_util_jar $(whoami)@$node:$TRAF_WORKDIR
+    ssh -q -n $node sudo cp $TRAF_WORKDIR/$hbase_trx_jar $HADOOP_PATH
+    ssh -q -n $node sudo cp $TRAF_WORKDIR/$traf_util_jar $HADOOP_PATH
+    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$hbase_trx_jar
+    ssh -q -n $node sudo chmod 644 $HADOOP_PATH/$traf_util_jar
+    done
+fi
+
+#=======================================
+#Check that HBase-trx copied to all nodes
+
+for node in $HBASE_NODES
+do
+   copiedOver=$(ssh -q -n $node sudo ls $HADOOP_PATH/hbase-trx* | wc -l)
+   if [[ $copiedOver -ne "1" ]]; then
+      echo "***ERROR: $hbase_trx_jar was not copied on $node"
+      echo "***ERROR: Please investigate why this happened"
+      echo "***ERROR: Trafodion can not start without this. EXITING..."
+      exit -1
+   fi
+done
+
+echo "***INFO: $hbase_trx_jar copied correctly! Huzzah."
+
+
+
+#Copy hbase-site.xml file
+ssh -q -n $HBASE_NODE sudo cp /etc/hbase/conf/hbase-site.xml $HOME
+ssh -q -n $HBASE_NODE sudo chown $(whoami).$(whoami) $HOME/hbase-site.xml
+ssh -q -n $HBASE_NODE sudo chmod 777 $HOME/hbase-site.xml
+
+scp -q $(whoami)@$HBASE_NODE:$HOME/hbase-site.xml $HOME
+if [[ $? -gt 1 ]]; then
+   echo "***ERROR: Unable to find /etc/hbase/conf/hbase-site.xml file on 
$HBASE_NODE or unable to copy."
+   exit -1
+fi
+sudo cp $HOME/hbase-site.xml $TRAF_WORKDIR
+sudo chown trafodion.trafodion $TRAF_WORKDIR/hbase-site.xml
+
+#=====================================
+# create new directories for bulkload and lobs if not already there
+rm $LOCAL_WORKDIR/traf_temp_output 2>/dev/null
+
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir -p 
/hbase-staging"'
+if [ $? != 0 ]; then
+   echo "***ERROR: 'hadoop fs -mkdir -p /hbase-staging' command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' 
"$HBASE_USER"':'"$HBASE_GROUP" '/hbase-staging"'
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir -p 
/bulkload"'
+if [ $? != 0 ]; then
+   echo "***ERROR: 'hadoop fs -mkdir -p /bulkload' command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' 
"$TRAF_USER"':trafodion /bulkload"'
+
+# Create lobs directory
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hadoop fs -mkdir -p /lobs"'
+if [ $? != 0 ]; then
+   echo "***ERROR: 'hadoop fs -mkdir -p /lobs' command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hadoop fs -chown -R' 
"$TRAF_USER"':trafodion /lobs"'
+
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -mkdir -p 
/trafodion_backups" 2> $HOME/traf_temp_output'
+if [ $? != 0 ]; then
+   echo "***ERROR: 'hadoop fs -mkdir -p /trafodion_backups' command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su' "$HDFS_USER" '--command "hadoop fs -chown -R' 
"$TRAF_USER"':trafodion /trafodion_backups"'
+
+
+ssh -q -n $HDFS_NODE 'rm -rf $HOME/traf_temp_output'
+#=====================================
+# change the hbase configuration using Ambari's script
+
+AMBARI_DIR=/var/lib/ambari-server/resources/scripts
+cd $LOCAL_WORKDIR
+
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
 '-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.master.distributed.log.splitting false'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.master.distributed.log.splitting 
through Ambari's configs.sh script."
+    echo "***ERROR: Check if Ambari URL is correct, may need to enter external 
IP address."
+    echo "***ERROR: Check if iptables/firewall is configured correctly and 
ports a
+    re enabled."
+    echo "***ERROR: Check that HBase is running without error."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.coprocessor.region.classes 
"org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionObserver,org.apache.hadoop.hbase.coprocessor.transactional.TrxRegionEndpoint,org.apache.hadoop.hbase.coprocessor.AggregateImplementation"'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.coprocessor.region.classes through 
Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.hregion.impl 
org.apache.hadoop.hbase.regionserver.transactional.TransactionalRegion'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.hregion.impl through Ambari's 
configs.sh script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.regionserver.region.split.policy 
org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.regionserver.region.split.policy 
through Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2 
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh  -u' "$ADMIN" '-p' 
"$PASSWORD" '-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.snapshot.enabled true'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.snapshot.enabled through Ambari's 
configs.sh script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.bulkload.staging.dir /hbase-staging'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.bulkload.staging.dir through 
Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.regionserver.region.transactional.tlog true'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify 
hbase.regionserver.region.transactional.tlog through Ambari's configs.sh 
script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.snapshot.master.timeoutMillis 600000'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.snapshot.master.timeoutMillis 
through Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.snapshot.region.timeout 600000'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.snapshot.region.timeout through 
Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hdfs-site 
dfs.namenode.acls.enabled true'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify dfs.namenode.acls.enabled through 
Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'hbase-site 
hbase.client.scanner.timeout.period 600000'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify hbase.client.scanner.timeout.period 
through Ambari's configs.sh script."
+    exit -1
+fi
+sleep 2
+echo
+
+#Change Zookeeeper settings
+
+ssh -q -n $AMBARI_HOST "$AMBARI_DIR"'/configs.sh -u' "$ADMIN" '-p' "$PASSWORD" 
'-port' "$PORT" 'set' "$AMBARI_HOST" "$CLUSTER_NAME" 'zoo.cfg maxClientCnxns 0'
+if [ $? != 0 ]; then
+    echo "***ERROR: unable to modify maxClientCnxns through Ambari's 
configs.sh script."
+    exit -1
+fi
+sleep 2
+echo
+
+
+#=====================================
+# stop HBase to restart HDFS and pick up all the changes just made
+
+poll_time=30
+echo "***INFO: Restarting HBase to pick up config changes for Trafodion"
+echo "***INFO: Stopping HBase..."
+curl -k -u $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "INSTALLED" }}' \
+    $URL/api/v1/clusters/$CLUSTER_NAME/services/HBASE > 
$TRAF_WORKDIR/traf_hbase_restart_temp
+
+if [ $? != 0 ]; then 
+   echo "***ERROR: Unable to stop HBase"
+   echo "***ERROR: Please manually restart HBase through the Ambari web GUI"
+fi 
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_hbase_restart_temp | grep Error | 
wc -l)
+
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to stop HBase"
+    echo "***ERROR: Please manually restart HBase through the Ambari web GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until HBase stop is 
completed."
+command_id=$(cat $TRAF_WORKDIR/traf_hbase_restart_temp | grep id | awk '{print 
$3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until stop is completed as a stop can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl -k -u $ADMIN:$PASSWORD \
+        $URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/hbase_restart_status_temp
+    cat $LOCAL_WORKDIR/hbase_restart_status_temp
+    echo "***INFO: ...polling every $poll_time seconds until HBase stop is 
completed."
+    # if stop command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/hbase_restart_status_temp | grep 
'"request_status" : "COMPLETED"' | wc -l)
+done
+
+echo "***INFO: HBase stop completed"
+
+#=====================================
+#Stop Zookeeper
+
+echo "***INFO: Stopping Zookeeper..."
+curl -k --user $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "INSTALLED" }}' \
+    $URL/api/v1/clusters/$CLUSTER_NAME/services/ZOOKEEPER > 
$TRAF_WORKDIR/traf_zoo_restart_temp
+
+if [ $? != 0 ]; then
+   echo "***ERROR: Unable to restart Zookeeper"
+   echo "***ERROR: Please manually restart Zookeeper through the Ambari web 
GUI"
+fi
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_zoo_restart_temp | grep Error | wc 
-l)
+
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to restart Zookeeper"
+    echo "***ERROR: Please manually restart Zookeeper through the Ambari web 
GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until Zookeeper stop is 
completed."
+command_id=$(cat $TRAF_WORKDIR/traf_zoo_restart_temp | grep id | awk '{print 
$3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until stop is completed as a stop can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl -k --user $ADMIN:$PASSWORD \
+        $URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/traf_zoo_restart_temp
+    cat $LOCAL_WORKDIR/traf_zoo_restart_temp
+    echo "***INFO: ...polling every $poll_time seconds until Zookeeper stop is 
completed."
+    # if stop command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/traf_zoo_restart_temp | grep 
'"request_status" : "COMPLETED"' | wc -l)
+done
+
+#=====================================
+# restart HDFS to pick up all the changes just made
+
+echo "***INFO: Restarting HDFS to pick up config changes for Trafodion"
+echo "***INFO: Stopping HDFS..."
+curl -k --user $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "INSTALLED" }}' \
+    $URL/api/v1/clusters/$CLUSTER_NAME/services/HDFS > 
$TRAF_WORKDIR/traf_hdfs_restart_temp
+
+if [ $? != 0 ]; then 
+   echo "***ERROR: Unable to restart HDFS"
+   echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi 
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_hdfs_restart_temp | grep Error | wc 
-l)
+
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to restart HDFS"
+    echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until HDFS stop is 
completed."
+command_id=$(cat $TRAF_WORKDIR/traf_hdfs_restart_temp | grep id | awk '{print 
$3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until stop is completed as a stop can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl -k --user $ADMIN:$PASSWORD \
+        $URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    cat $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    echo "***INFO: ...polling every $poll_time seconds until HDFS stop is 
completed."
+    # if stop command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/traf_hdfs_restart_temp | grep 
'"request_status" : "COMPLETED"' | wc -l)
+done
+
+echo "***INFO: Starting HDFS..."
+curl -k --user $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "STARTED" }}' \
+    $URL/api/v1/clusters/$CLUSTER_NAME/services/HDFS > 
$TRAF_WORKDIR/traf_hdfs_restart_temp
+
+if [ $? != 0 ]; then
+   echo "***ERROR: Unable to restart HDFS"
+   echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_hdfs_restart_temp | grep Error | wc 
-l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to restart HDFS"
+    echo "***ERROR: Please manually restart HDFS through the Ambari web GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until HDFS start is 
completed."
+command_id=$(cat $TRAF_WORKDIR/traf_hdfs_restart_temp | grep id | awk '{print 
$3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until start is completed as a start can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl -k --user $ADMIN:$PASSWORD \
+        $URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    cat $LOCAL_WORKDIR/traf_hdfs_restart_temp
+    echo "***INFO: ...polling every $poll_time seconds until HDFS start is 
completed."
+    # if start command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/traf_hdfs_restart_temp | grep 
'"request_status" : "COMPLETED"' | wc -l)
+done
+
+echo "***INFO: HDFS restart completed"
+
+# wait to make sure HDFS is fully restarted and out of safemode
+echo "***INFO: waiting for HDFS to exit safemode"
+sudo su hdfs --command "hdfs dfsadmin -safemode wait"
+
+#=====================================
+# Start Zookeeper to pick up all the changes just made
+
+echo "***INFO: Starting Zookeeper..."
+curl -k --user $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "STARTED" }}' \
+    $URL/api/v1/clusters/$CLUSTER_NAME/services/ZOOKEEPER > 
$TRAF_WORKDIR/traf_zoo_restart_temp
+
+if [ $? != 0 ]; then
+   echo "***ERROR: Unable to restart Zookeeper"
+   echo "***ERROR: Please manually restart Zookeeper through the Ambari web 
GUI"
+fi
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_zoo_restart_temp | grep Error | wc 
-l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to restart Zookeeper"
+    echo "***ERROR: Please manually restart Zookeeper through the Ambari web 
GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until Zookeeper start is 
completed."
+command_id=$(cat $TRAF_WORKDIR/traf_zoo_restart_temp | grep id | awk '{print 
$3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until start is completed as a start can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl -k --user $ADMIN:$PASSWORD \
+        $URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/traf_zoo_restart_temp
+    cat $LOCAL_WORKDIR/traf_zoo_restart_temp
+    echo "***INFO: ...polling every $poll_time seconds until Zookeeper start 
is completed."
+    # if start command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/traf_zoo_restart_temp | grep 
'"request_status" : "COMPLETED"' | wc -l)
+done
+
+echo "***INFO: Zookeeper start completed"
+
+#=====================================
+# restart HBase to pick up all the changes just made
+
+echo "***INFO: Restarting HBase to pick up config changes for Trafodion"
+echo "***INFO: Starting HBase..."
+curl -k -u $ADMIN:$PASSWORD \
+    -H "X-Requested-By: Trafodion" \
+    -X PUT -d '{"ServiceInfo": { "state" : "STARTED" }}' \
+    $URL/api/v1/clusters/$CLUSTER_NAME/services/HBASE > 
$TRAF_WORKDIR/traf_hbase_restart_temp
+
+if [ $? != 0 ]; then
+   echo "***ERROR: Unable to restart HBase"
+   echo "***ERROR: Please manually restart HBase through the Ambari web GUI"
+fi
+
+# in most cases curl does not return an error
+# so curl's actual output needs to be checked, too
+curl_error=$(grep TITLE $TRAF_WORKDIR/traf_hbase_restart_temp | grep Error | 
wc -l)
+if [ $curl_error -ne 0 ]; then
+    echo "***ERROR: Unable to restart HBase"
+    echo "***ERROR: Please manually restart HBase through the Ambari web GUI"
+fi
+
+echo "***INFO: ...polling every $poll_time seconds until HBase start is 
completed."
+command_id=$(cat $TRAF_WORKDIR/traf_hbase_restart_temp | grep id | awk '{print 
$3}' | sed -e 's@,@@' )
+echo "***DEBUG: Ambari command_id=$command_id"
+
+# poll until start is completed as a start can take a while
+completed=0
+while [ $completed -eq 0 ]; do
+    sleep $poll_time
+    curl -k -u $ADMIN:$PASSWORD \
+        $URL/api/v1/clusters/$CLUSTER_NAME/requests/$command_id \
+        > $LOCAL_WORKDIR/hbase_restart_status_temp
+    cat $LOCAL_WORKDIR/hbase_restart_status_temp
+    echo "***INFO: ...polling every $poll_time seconds until HBase start is 
completed."
+    # if start command is completed then completed will not equal 0
+    completed=$(cat $LOCAL_WORKDIR/hbase_restart_status_temp | grep 
'"request_status" : "COMPLETED"' | wc -l)
+done
+
+echo "***INFO: HBase restart completed"
+
+#=====================================
+# NOTE: These command must be done AFTER acls are 
+#       enabled and HDFS has been restarted
+echo "***INFO: Setting HDFS ACLs for snapshot scan support"
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -mkdir -p 
/apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -mkdir -p /apps/hbase/data/archive) command 
failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -chown hbase:hdfs 
/apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -chown hbase:hdfs /apps/hbase/data/archive) 
command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -setfacl -R -m 
user:'"$TRAF_USER"':rwx /apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx 
/apps/hbase/data/archive) command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -setfacl -R -m 
default:user:'"$TRAF_USER"':rwx /apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx 
/apps/hbase/data/archive) command failed"
+   exit -1
+fi
+ssh -q -n $HDFS_NODE 'sudo su hdfs --command "hdfs dfs -setfacl -R -m 
mask::rwx /apps/hbase/data/archive"'
+if [ $? != 0 ]; then
+   echo "***ERROR: (hdfs dfs -setfacl -R -m mask::rwx 
/apps/hbase/data/archive) command failed"
+   exit -1
+fi
+
+# clean up files generated by Ambari's config.sh script
+ssh -q -n $AMBARI_HOST 'rm $HOME/doSet_version*'
+
+MODS_COMPLETE="Y"
+sudo chmod 777 $TRAF_CONFIG
+sed -i '/MODS_COMPLETE\=/d' $TRAF_CONFIG
+echo "export MODS_COMPLETE=\"$MODS_COMPLETE\"" >> $TRAF_CONFIG
+sudo chmod 777 $TRAF_CONFIG
+source $TRAF_CONFIG
+
+TRAF_CONFIG="trafodion_config"
+TRAF_CONFIG_DIR="/etc/trafodion"
+
+if [ $node_count -ne 1 ]; then
+   cp $TRAF_CONFIG $LOCAL_WORKDIR
+   $TRAF_PDCP $LOCAL_WORKDIR/$TRAF_CONFIG_FILE $HOME
+   $TRAF_PDSH sudo mkdir -p $TRAF_CONFIG_DIR
+   $TRAF_PDSH sudo cp $HOME/$TRAF_CONFIG_FILE $TRAF_CONFIG_DIR
+   $TRAF_PDSH sudo chmod 777 $TRAF_CONFIG
+fi
+

Reply via email to