Author: stack
Date: Thu May 30 17:16:01 2013
New Revision: 1487937

URL: http://svn.apache.org/r1487937
Log:
HBASE-7244 Provide a command or argument to startup, that formats znodes if 
provided; FORGOT TO SVN ADD bin/hbase-cleanup.sh

Added:
    hbase/branches/0.95/bin/hbase-cleanup.sh

Added: hbase/branches/0.95/bin/hbase-cleanup.sh
URL: 
http://svn.apache.org/viewvc/hbase/branches/0.95/bin/hbase-cleanup.sh?rev=1487937&view=auto
==============================================================================
--- hbase/branches/0.95/bin/hbase-cleanup.sh (added)
+++ hbase/branches/0.95/bin/hbase-cleanup.sh Thu May 30 17:16:01 2013
@@ -0,0 +1,146 @@
+#!/usr/bin/env bash
+#
+#/**
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# cleans hbase related data from zookeeper and hdfs if no hbase process is 
alive.
+#
+# Environment Variables
+#
+#   HBASE_REGIONSERVERS    File naming remote hosts.
+#     Default is ${HADOOP_CONF_DIR}/regionservers
+#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_HOME}/conf.
+#   HBASE_CONF_DIR  Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
+#   HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+#   HADOOP_SLAVE_TIMEOUT Seconds to wait for timing out a remote command.
+#   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
+#
+
+usage="Usage: hbase-cleanup.sh (zk|hdfs|all)"
+
+bin=`dirname "$0"`
+bin=`cd "$bin">/dev/null; pwd`
+
+# This will set HBASE_HOME, etc.
+. "$bin"/hbase-config.sh
+
+case $1 in
+  --cleanZk|--cleanHdfs|--cleanAll) 
+    matches="yes" ;;
+  *) ;;
+esac
+if [ $# -ne 1 -o "$matches" = "" ]; then
+  echo $usage
+  exit 1;
+fi
+
+format_option=$1;
+
+distMode=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool 
hbase.cluster.distributed | head -n 1`
+
+if [ "$distMode" == 'false' ];then
+  echo "Skipping hbase data clearing in standalone mode." 2>&1
+  exit 1;
+fi
+
+zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool 
zookeeper.znode.parent`
+if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
+
+hrootdir=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.rootdir`
+if [ "$hrootdir" == "null" ]; then hrootdir="file:///tmp/hbase-${USER}/hbase"; 
fi
+
+check_for_znodes() {
+  command=$1;
+  case $command in
+    regionservers)
+      zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool 
zookeeper.znode.rs`
+      if [ "$zchild" == "null" ]; then zchild="rs"; fi
+      ;;
+    backupmasters)
+      zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool 
zookeeper.znode.backup.masters`
+      if [ "$zchild" == "null" ]; then zchild="backup-masters"; fi
+      ;;
+  esac
+  znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" 
| sed "s/\]//"`
+  if [ "$znodes" != "" ]; then
+    echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without 
cleaning hbase data."
+    echo #force a newline      
+    exit 1;
+  else
+    echo -n "All ZNode(s) of $command are expired."
+  fi
+  echo #force a newline
+}
+
+execute_zk_command() {
+  command=$1;
+  "$bin"/hbase zkcli $command 2>&1
+}
+
+execute_hdfs_command() {
+  command=$1;
+  "$bin"/hbase org.apache.hadoop.fs.FsShell $command 2>&1
+}
+
+clean_up() {
+  case $1 in
+  --cleanZk) 
+    execute_zk_command "rmr ${zparent}";
+    ;;
+  --cleanHdfs)
+    execute_hdfs_command "-rmr ${hrootdir}"
+    ;;
+  --cleanAll)
+    execute_zk_command "rmr ${zparent}";
+    execute_hdfs_command "-rmr ${hrootdir}"
+    ;;
+  *)
+    ;;
+  esac 
+}
+
+check_znode_exists() {
+  command=$1
+  "$bin"/hbase zkcli stat $command 2>&1 | grep "Node does not 
exist\|Connection refused"
+}
+
+check_znode_exists $zparent
+if [ $? -ne 0 ]; then
+  # make sure the online region server(s) znode(s) have been deleted before 
continuing
+  check_for_znodes regionservers
+  # make sure the backup master(s) znode(s) has been deleted before continuing
+  check_for_znodes backupmasters
+  # make sure the master znode has been deleted before continuing
+  zmaster=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool 
zookeeper.znode.master`
+  if [ "$zmaster" == "null" ]; then zmaster="master"; fi
+    zmaster=$zparent/$zmaster
+    check_znode_exists $zmaster
+  if [ $? -ne 0 ]; then
+    echo -n "Master ZNode is not expired. Exiting without cleaning hbase data."
+    echo #force a new line
+    exit 1
+  else
+    echo "Active Master ZNode also expired."
+  fi
+  echo #force a newline
+else
+  echo "HBase parent znode ${zparent} does not exist."
+fi
+
+# cleans zookeeper and/or hdfs data.
+clean_up $format_option


Reply via email to