HADOOP-12933. bin/hdfs work for dynamic subcommands

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7020c503
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7020c503
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7020c503

Branch: refs/heads/HADOOP-12930
Commit: 7020c503d69f3f2538f25620cf116812b792a5fb
Parents: ff0d5fa
Author: Allen Wittenauer <a...@apache.org>
Authored: Tue May 3 12:45:21 2016 -0700
Committer: Allen Wittenauer <a...@apache.org>
Committed: Wed May 4 20:43:21 2016 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/bin/hdfs               | 402 ++++++++++---------
 1 file changed, 212 insertions(+), 190 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7020c503/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index c365250..310fb41 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -16,7 +16,12 @@
 # limitations under the License.
 
 MYNAME="${BASH_SOURCE-$0}"
+HADOOP_SHELL_EXECNAME="${MYNAME##*/}"
 
+## @description  build up the hdfs command's usage text.
+## @audience     public
+## @stability    stable
+## @replaceable  no
 function hadoop_usage
 {
   hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
@@ -56,7 +61,194 @@ function hadoop_usage
   hadoop_add_subcommand "storagepolicies" "list/get/set block storage policies"
   hadoop_add_subcommand "version" "print the version"
   hadoop_add_subcommand "zkfc" "run the ZK Failover Controller daemon"
-  hadoop_generate_usage "${MYNAME}" false
+  hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
+}
+
+## @description  Default command handler for hadoop command
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        CLI arguments
+function hdfscmd_case
+{
+  subcmd=$1
+  shift
+
+  case ${subcmd} in
+    balancer)
+      supportdaemonization="true"
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.balancer.Balancer
+      hadoop_debug "Appending HADOOP_BALANCER_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}"
+    ;;
+    cacheadmin)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
+    ;;
+    classpath)
+      hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
+    ;;
+    crypto)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CryptoAdmin
+    ;;
+    datanode)
+      supportdaemonization="true"
+      # Determine if we're starting a secure datanode, and
+      # if so, redefine appropriate variables
+      if [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
+        secure_service="true"
+        secure_user="${HADOOP_SECURE_DN_USER}"
+
+        # backward compatiblity
+        
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
+        
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
+
+        hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
+        hadoop_debug "Appending HADOOP_DN_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
+        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS} 
${HADOOP_DN_SECURE_EXTRA_OPTS}"
+        
HADOOP_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
+      else
+        hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
+        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}"
+        HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
+      fi
+    ;;
+    debug)
+      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DebugAdmin'
+    ;;
+    dfs)
+      HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
+      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+    ;;
+    dfsadmin)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin
+      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+    ;;
+    envvars)
+      echo "JAVA_HOME='${JAVA_HOME}'"
+      echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
+      echo "HDFS_DIR='${HDFS_DIR}'"
+      echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
+      echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
+      echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
+      echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
+      echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
+      exit 0
+    ;;
+    erasurecode)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
+      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+    ;;
+    fetchdt)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+    ;;
+    fsck)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
+      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+    ;;
+    getconf)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetConf
+    ;;
+    groups)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetGroups
+    ;;
+    haadmin)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSHAAdmin
+      hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+    ;;
+    journalnode)
+      supportdaemonization="true"
+      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
+      hadoop_debug "Appending HADOOP_JOURNALNODE_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}"
+    ;;
+    jmxget)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet
+    ;;
+    lsSnapshottableDir)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
+    ;;
+    mover)
+      supportdaemonization="true"
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover
+      hadoop_debug "Appending HADOOP_MOVER_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
+    ;;
+    namenode)
+      supportdaemonization="true"
+      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.NameNode'
+      hadoop_debug "Appending HADOOP_NAMENODE_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}"
+      hadoop_add_param HADOOP_OPTS hdfs.audit.logger 
"-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
+    ;;
+    nfs3)
+      supportdaemonization="true"
+      if [[ -n "${HADOOP_PRIVILEGED_NFS_USER}" ]]; then
+        secure_service="true"
+        secure_user="${HADOOP_PRIVILEGED_NFS_USER}"
+
+        # backward compatiblity
+        
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
+        
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
+
+        hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
+        hadoop_debug "Appending HADOOP_NFS3_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
+        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS} 
${HADOOP_NFS3_SECURE_EXTRA_OPTS}"
+        
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
+      else
+        hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
+        HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}"
+        HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
+      fi
+    ;;
+    oev)
+      
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
+    ;;
+    oiv)
+      
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
+    ;;
+    oiv_legacy)
+      
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+    ;;
+    portmap)
+      supportdaemonization="true"
+      HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
+      hadoop_debug "Appending HADOOP_PORTMAP_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}"
+    ;;
+    secondarynamenode)
+      supportdaemonization="true"
+      
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+      hadoop_debug "Appending HADOOP_SECONDARYNAMENODE_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
+      hadoop_add_param HADOOP_OPTS hdfs.audit.logger 
"-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
+    ;;
+    snapshotDiff)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
+    ;;
+    storagepolicies)
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
+    ;;
+    version)
+      HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
+    ;;
+    zkfc)
+      supportdaemonization="true"
+      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
+      hadoop_debug "Appending HADOOP_ZKFC_OPTS onto HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}"
+    ;;
+    *)
+      HADOOP_CLASSNAME="${subcmd}"
+      if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+  esac
 }
 
 # let's locate libexec...
@@ -81,186 +273,16 @@ if [[ $# = 0 ]]; then
   hadoop_exit_with_usage 1
 fi
 
-COMMAND=$1
+HADOOP_SUBCMD=$1
 shift
 
-case ${COMMAND} in
-  balancer)
-    supportdaemonization="true"
-    CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
-    hadoop_debug "Appending HADOOP_BALANCER_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}"
-  ;;
-  cacheadmin)
-    CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
-  ;;
-  classpath)
-    hadoop_do_classpath_subcommand CLASS "$@"
-  ;;
-  crypto)
-    CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
-  ;;
-  datanode)
-    supportdaemonization="true"
-    # Determine if we're starting a secure datanode, and
-    # if so, redefine appropriate variables
-    if [[ -n "${HADOOP_SECURE_DN_USER}" ]]; then
-      secure_service="true"
-      secure_user="${HADOOP_SECURE_DN_USER}"
-
-      # backward compatiblity
-      
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
-      
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
-
-      hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
-      hadoop_debug "Appending HADOOP_DN_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS} 
${HADOOP_DN_SECURE_EXTRA_OPTS}"
-      CLASS="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
-    else
-      hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}"
-      CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
-    fi
-  ;;
-  debug)
-    CLASS='org.apache.hadoop.hdfs.tools.DebugAdmin'
-  ;;
-  dfs)
-    CLASS=org.apache.hadoop.fs.FsShell
-    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-  ;;
-  dfsadmin)
-    CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
-    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-  ;;
-  envvars)
-    echo "JAVA_HOME='${JAVA_HOME}'"
-    echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
-    echo "HDFS_DIR='${HDFS_DIR}'"
-    echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
-    echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
-    echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
-    echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
-    echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
-    exit 0
-  ;;
-  erasurecode)
-    CLASS=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
-    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-  ;;
-  fetchdt)
-    CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
-  ;;
-  fsck)
-    CLASS=org.apache.hadoop.hdfs.tools.DFSck
-    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-  ;;
-  getconf)
-    CLASS=org.apache.hadoop.hdfs.tools.GetConf
-  ;;
-  groups)
-    CLASS=org.apache.hadoop.hdfs.tools.GetGroups
-  ;;
-  haadmin)
-    CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
-    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-  ;;
-  journalnode)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
-    hadoop_debug "Appending HADOOP_JOURNALNODE_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}"
-  ;;
-  jmxget)
-    CLASS=org.apache.hadoop.hdfs.tools.JMXGet
-  ;;
-  lsSnapshottableDir)
-    CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
-  ;;
-  mover)
-    supportdaemonization="true"
-    CLASS=org.apache.hadoop.hdfs.server.mover.Mover
-    hadoop_debug "Appending HADOOP_MOVER_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
-  ;;
-  namenode)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
-    hadoop_debug "Appending HADOOP_NAMENODE_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}"
-    hadoop_add_param HADOOP_OPTS hdfs.audit.logger 
"-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
-  ;;
-  nfs3)
-    supportdaemonization="true"
-    if [[ -n "${HADOOP_PRIVILEGED_NFS_USER}" ]]; then
-      secure_service="true"
-      secure_user="${HADOOP_PRIVILEGED_NFS_USER}"
-
-      # backward compatiblity
-      
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
-      
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
-
-      hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
-      hadoop_debug "Appending HADOOP_NFS3_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS} 
${HADOOP_NFS3_SECURE_EXTRA_OPTS}"
-      CLASS=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
-    else
-      hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}"
-      CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
-    fi
-  ;;
-  oev)
-    CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
-  ;;
-  oiv)
-    CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
-  ;;
-  oiv_legacy)
-    CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
-  ;;
-  portmap)
-    supportdaemonization="true"
-    CLASS=org.apache.hadoop.portmap.Portmap
-    hadoop_debug "Appending HADOOP_PORTMAP_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}"
-  ;;
-  secondarynamenode)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
-    hadoop_debug "Appending HADOOP_SECONDARYNAMENODE_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
-    hadoop_add_param HADOOP_OPTS hdfs.audit.logger 
"-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
-  ;;
-  snapshotDiff)
-    CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
-  ;;
-  storagepolicies)
-    CLASS=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
-  ;;
-  version)
-    CLASS=org.apache.hadoop.util.VersionInfo
-  ;;
-  zkfc)
-    supportdaemonization="true"
-    CLASS='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
-    hadoop_debug "Appending HADOOP_ZKFC_OPTS onto HADOOP_OPTS"
-    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}"
-  ;;
-  *)
-    CLASS="${COMMAND}"
-    if ! hadoop_validate_classname "${CLASS}"; then
-      hadoop_exit_with_usage 1
-    fi
-  ;;
-esac
+if declare -f hdfs_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
+  "hdfs_subcommand_${HADOOP_SUBCMD}" "$@"
+else
+  hdfscmd_case "${HADOOP_SUBCMD}" "$@"
+fi
 
-hadoop_verify_user "${COMMAND}"
+hadoop_verify_user "${HADOOP_SUBCMD}"
 
 if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
   hadoop_common_slave_mode_execute "${HADOOP_HDFS_HOME}/bin/hdfs" 
"${HADOOP_USER_PARAMS[@]}"
@@ -271,14 +293,14 @@ if [[ -n "${secure_service}" ]]; then
   HADOOP_SECURE_USER="${secure_user}"
   hadoop_verify_secure_prereq
   hadoop_setup_secure_service
-  
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
-  
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.err"
-  
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
-  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
-  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
+  
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  
priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
+  
priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
+  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
 else
-  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out"
-  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid"
+  
daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
+  
daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
 fi
 
 if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
@@ -286,10 +308,10 @@ if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
   HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
   if [[ -n "${secure_service}" ]]; then
     # shellcheck disable=SC2034
-    
HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
+    
HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
   else
     # shellcheck disable=SC2034
-    HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.log"
+    
HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
   fi
 fi
 
@@ -298,15 +320,15 @@ hadoop_finalize
 if [[ -n "${supportdaemonization}" ]]; then
   if [[ -n "${secure_service}" ]]; then
     hadoop_secure_daemon_handler \
-    "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\
+    "${HADOOP_DAEMON_MODE}" "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}"\
     "${daemon_pidfile}" "${daemon_outfile}" \
     "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}" "$@"
   else
-    hadoop_daemon_handler "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\
+    hadoop_daemon_handler "${HADOOP_DAEMON_MODE}" "${HADOOP_SUBCMD}" 
"${HADOOP_CLASSNAME}"\
     "${daemon_pidfile}" "${daemon_outfile}" "$@"
   fi
   exit $?
 else
   # shellcheck disable=SC2086
-  hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
+  hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "$@"
 fi


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to