Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 b67a7eda9 -> 8658ed7dc


HDFS-13342. Ozone: Rename and fix ozone CLI scripts. Contributed by Shashikant 
Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8658ed7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8658ed7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8658ed7d

Branch: refs/heads/HDFS-7240
Commit: 8658ed7dccd2e53aed55f0158293886e7a8a45c8
Parents: b67a7ed
Author: Mukul Kumar Singh <msi...@apache.org>
Authored: Fri Apr 6 16:55:08 2018 +0530
Committer: Mukul Kumar Singh <msi...@apache.org>
Committed: Fri Apr 6 16:55:08 2018 +0530

----------------------------------------------------------------------
 .../src/main/compose/cblock/docker-compose.yaml |  18 +-
 .../src/main/compose/ozone/docker-compose.yaml  |  14 +-
 .../hdds/scm/StorageContainerManager.java       |   6 +-
 .../src/test/compose/docker-compose.yaml        |  14 +-
 .../test/robotframework/acceptance/ozone.robot  |  18 +-
 hadoop-ozone/common/src/main/bin/oz             | 202 -------------------
 hadoop-ozone/common/src/main/bin/ozone          | 202 +++++++++++++++++++
 hadoop-ozone/common/src/main/bin/start-ozone.sh |  14 +-
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |  14 +-
 .../apache/hadoop/ozone/freon/OzoneGetConf.java |   4 +-
 .../src/main/shellprofile.d/hadoop-ozone.sh     |   2 +-
 .../hadoop/ozone/ksm/KeySpaceManager.java       |   4 +-
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |  16 +-
 .../src/main/site/markdown/OzoneCommandShell.md |  34 ++--
 14 files changed, 281 insertions(+), 281 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/cblock/docker-compose.yaml 
b/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
index b88514e..fa4d267 100644
--- a/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
    namenode:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       hostname: namenode
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
@@ -29,38 +29,38 @@ services:
          - ./docker-config
       command: ["/opt/hadoop/bin/hdfs","namenode"]
    datanode:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
         - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
         - 9864
-      command: ["/opt/hadoop/bin/oz","datanode"]
+      command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
          - ./docker-config
    jscsi:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       ports:
         - 3260:3260
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/oz","jscsi"]
+      command: ["/opt/hadoop/bin/ozone","jscsi"]
    cblock:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/oz","cblockserver"]
+      command: ["/opt/hadoop/bin/ozone","cblockserver"]
    scm:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
          - 9876:9876
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/oz","scm"]
+      command: ["/opt/hadoop/bin/ozone","scm"]
       environment:
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml 
b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index f2b263c..13a7db6 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
    namenode:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       hostname: namenode
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
@@ -29,16 +29,16 @@ services:
          - ./docker-config
       command: ["/opt/hadoop/bin/hdfs","namenode"]
    datanode:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
         - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
         - 9864
-      command: ["/opt/hadoop/bin/oz","datanode"]
+      command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
    ksm:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
@@ -47,9 +47,9 @@ services:
          ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/oz","ksm"]
+      command: ["/opt/hadoop/bin/ozone","ksm"]
    scm:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
@@ -58,4 +58,4 @@ services:
           - ./docker-config
       environment:
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/oz","scm"]
+      command: ["/opt/hadoop/bin/ozone","scm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
index 1a78dee..ce0d4f8 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java
@@ -246,12 +246,12 @@ public class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 
 
   private static final String USAGE =
-      "Usage: \n oz scm [genericOptions] "
+      "Usage: \n ozone scm [genericOptions] "
           + "[ " + StartupOption.INIT.getName() + " [ "
           + StartupOption.CLUSTERID.getName() + " <cid> ] ]\n "
-          + "oz scm [genericOptions] [ "
+          + "ozone scm [genericOptions] [ "
           + StartupOption.GENCLUSTERID.getName() + " ]\n " +
-          "oz scm [ "
+          "ozone scm [ "
           + StartupOption.HELP.getName() + " ]\n";
   /**
    * Creates a new StorageContainerManager.  Configuration will be updated with

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml 
b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
index fb4e5d3..8350eae 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/compose/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
    namenode:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       hostname: namenode
       volumes:
          - ${HADOOPDIR}:/opt/hadoop
@@ -29,16 +29,16 @@ services:
          - ./docker-config
       command: ["/opt/hadoop/bin/hdfs","namenode"]
    datanode:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
         - ${HADOOPDIR}:/opt/hadoop
       ports:
         - 9864
-      command: ["/opt/hadoop/bin/oz","datanode"]
+      command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
    ksm:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
          - ${HADOOPDIR}:/opt/hadoop
       ports:
@@ -47,9 +47,9 @@ services:
          ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/oz","ksm"]
+      command: ["/opt/hadoop/bin/ozone","ksm"]
    scm:
-      image: elek/hadoop-runner:o3-refactor
+      image: apache/hadoop-runner
       volumes:
          - ${HADOOPDIR}:/opt/hadoop
       ports:
@@ -58,4 +58,4 @@ services:
           - ./docker-config
       environment:
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/oz","scm"]
+      command: ["/opt/hadoop/bin/ozone","scm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot 
b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
index 31ddf36..ea9131e 100644
--- 
a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
+++ 
b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone.robot
@@ -32,7 +32,7 @@ Daemons are running without error
     Is daemon running without error           datanode
 
 Check if datanode is connected to the scm
-    Wait Until Keyword Succeeds     2min    5sec    Have healthy datanodes   1
+    Wait Until Keyword Succeeds     3min    5sec    Have healthy datanodes   1
 
 Scale it up to 5 datanodes
     Scale datanodes up  5
@@ -48,15 +48,15 @@ Test rest interface
     ${result} =     Execute on          datanode        curl -i -X DELETE 
${COMMON_RESTHEADER} "http://localhost:9880/volume1";
                     Should contain      ${result}       200 OK
 
-Test oz cli
-                    Execute on          datanode        oz oz -createVolume 
http://localhost:9880/hive -user bilbo -quota 100TB -root
-    ${result} =     Execute on          datanode        oz oz -listVolume 
http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | 
select(.volumeName=="hive")'
+Test ozone cli
+                    Execute on          datanode        ozone oz -createVolume 
http://localhost:9880/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on          datanode        ozone oz -listVolume 
http://localhost:9880/ -user bilbo | grep -v Removed | jq '.[] | 
select(.volumeName=="hive")'
                     Should contain      ${result}       createdOn
-                    Execute on          datanode        oz oz -createBucket 
http://localhost:9880/hive/bb1
-    ${result}       Execute on          datanode        oz oz -listBucket 
http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | 
select(.bucketName=="bb1") | .volumeName'
+                    Execute on          datanode        ozone oz -createBucket 
http://localhost:9880/hive/bb1
+    ${result}       Execute on          datanode        ozone oz -listBucket 
http://localhost:9880/hive/ | grep -v Removed | jq -r '.[] | 
select(.bucketName=="bb1") | .volumeName'
                     Should Be Equal     ${result}       hive
-                    Execute on          datanode        oz oz -deleteBucket 
http://localhost:9880/hive/bb1
-                    Execute on          datanode        oz oz -deleteVolume 
http://localhost:9880/hive -user bilbo
+                    Execute on          datanode        ozone oz -deleteBucket 
http://localhost:9880/hive/bb1
+                    Execute on          datanode        ozone oz -deleteVolume 
http://localhost:9880/hive -user bilbo
 
 
 
@@ -67,7 +67,7 @@ Check webui static resources
         Should contain         ${result}               200
 
 Start freon testing
-    ${result} =                Execute on              ksm             oz 
freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
+    ${result} =                Execute on              ksm             ozone 
freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
         Wait Until Keyword Succeeds    3min    10sec           Should contain  
        ${result}               Number of Keys added: 125
         Should Not Contain             ${result}               ERROR
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/common/src/main/bin/oz
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/oz 
b/hadoop-ozone/common/src/main/bin/oz
deleted file mode 100755
index 7841e7a..0000000
--- a/hadoop-ozone/common/src/main/bin/oz
+++ /dev/null
@@ -1,202 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The name of the script being executed.
-HADOOP_SHELL_EXECNAME="oz"
-MYNAME="${BASH_SOURCE-$0}"
-
-## @description  build up the hdfs command's usage text.
-## @audience     public
-## @stability    stable
-## @replaceable  no
-function hadoop_usage
-{
-  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
-  hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
-  hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker 
mode"
-  hadoop_add_option "--loglevel level" "set the log4j level for this command"
-  hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
-  hadoop_add_option "--workers" "turn on worker mode"
-
-
-  hadoop_add_subcommand "cblock" admin "cblock CLI"
-  hadoop_add_subcommand "cblockserver" daemon "run cblock server"
-  hadoop_add_subcommand "classpath" client "prints the class path needed to 
get the hadoop jar and the required libraries"
-  hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
-  hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"
-  hadoop_add_subcommand "freon" client "runs an ozone data generator"
-  hadoop_add_subcommand "genesis" client "runs a collection of ozone 
benchmarks to help with tuning."
-  hadoop_add_subcommand "getozoneconf" client "get ozone config values from
-  configuration"
-  hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode 
or DataNode."
-  hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
-  hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
-  hadoop_add_subcommand "o3" client "command line interface for ozone"
-  hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata 
into relational data"
-  hadoop_add_subcommand "scm" daemon "run the Storage Container Manager 
service"
-  hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container 
Manager "
-  hadoop_add_subcommand "version" client "print the version"
-
-  hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
-}
-
-## @description  Default command handler for hadoop command
-## @audience     public
-## @stability    stable
-## @replaceable  no
-## @param        CLI arguments
-function ozonecmd_case
-{
-  subcmd=$1
-  shift
-
-  case ${subcmd} in
-    cblock)
-      HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
-    ;;
-    cblockserver)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.cblock.CBlockManager
-    ;;
-    classpath)
-      hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
-    ;;
-    datanode)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
-      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
-      hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
-      hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
-    ;;
-    envvars)
-      echo "JAVA_HOME='${JAVA_HOME}'"
-      echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
-      echo "HDFS_DIR='${HDFS_DIR}'"
-      echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
-      echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
-      echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
-      echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
-      echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
-      if [[ -n "${QATESTMODE}" ]]; then
-        echo "MYNAME=${MYNAME}"
-        echo "HADOOP_SHELL_EXECNAME=${HADOOP_SHELL_EXECNAME}"
-      fi
-      exit 0
-    ;;
-    freon)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
-    ;;
-    genesis)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
-    ;;
-    getozoneconf)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.OzoneGetConf
-    ;;
-    jscsi)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.cblock.jscsiHelper.SCSITargetDaemon
-    ;;
-    ksm)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
-    ;;
-    oz)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
-    ;;
-    noz)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SQLCLI
-    ;;
-    scm)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.StorageContainerManager'
-      hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto 
HADOOP_OPTS"
-      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}"
-    ;;
-    scmcli)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
-    ;;
-    version)
-      HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
-    ;;
-    *)
-      HADOOP_CLASSNAME="${subcmd}"
-      if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
-        hadoop_exit_with_usage 1
-      fi
-    ;;
-  esac
-}
-
-# let's locate libexec...
-if [[ -n "${HADOOP_HOME}" ]]; then
-  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
-else
-  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
-  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
-fi
-
-HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
-  # shellcheck 
source=./hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
-  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
-else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
-  exit 1
-fi
-
-# now that we have support code, let's abs MYNAME so we can use it later
-MYNAME=$(hadoop_abs "${MYNAME}")
-
-if [[ $# = 0 ]]; then
-  hadoop_exit_with_usage 1
-fi
-
-HADOOP_SUBCMD=$1
-shift
-
-if hadoop_need_reexec ozone "${HADOOP_SUBCMD}"; then
-  hadoop_uservar_su ozone "${HADOOP_SUBCMD}" \
-    "${MYNAME}" \
-    "--reexec" \
-    "${HADOOP_USER_PARAMS[@]}"
-  exit $?
-fi
-
-hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
-
-HADOOP_SUBCMD_ARGS=("$@")
-
-if declare -f ozone_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
-  hadoop_debug "Calling dynamically: ozone_subcommand_${HADOOP_SUBCMD} 
${HADOOP_SUBCMD_ARGS[*]}"
-  "ozone_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
-else
-  ozonecmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
-fi
-
-hadoop_add_client_opts
-
-if [[ ${HADOOP_WORKER_MODE} = true ]]; then
-  hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/oz" 
"${HADOOP_USER_PARAMS[@]}"
-  exit $?
-fi
-
-hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
-
-# everything is in globals at this point, so call the generic handler
-hadoop_generic_java_subcmd_handler

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone 
b/hadoop-ozone/common/src/main/bin/ozone
new file mode 100755
index 0000000..2f2c98c
--- /dev/null
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -0,0 +1,202 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The name of the script being executed.
+HADOOP_SHELL_EXECNAME="ozone"
+MYNAME="${BASH_SOURCE-$0}"
+
+## @description  build up the hdfs command's usage text.
+## @audience     public
+## @stability    stable
+## @replaceable  no
+function hadoop_usage
+{
+  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
+  hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
+  hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker 
mode"
+  hadoop_add_option "--loglevel level" "set the log4j level for this command"
+  hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
+  hadoop_add_option "--workers" "turn on worker mode"
+
+
+  hadoop_add_subcommand "cblock" admin "cblock CLI"
+  hadoop_add_subcommand "cblockserver" daemon "run cblock server"
+  hadoop_add_subcommand "classpath" client "prints the class path needed to 
get the hadoop jar and the required libraries"
+  hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
+  hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"
+  hadoop_add_subcommand "freon" client "runs an ozone data generator"
+  hadoop_add_subcommand "genesis" client "runs a collection of ozone 
benchmarks to help with tuning."
+  hadoop_add_subcommand "getozoneconf" client "get ozone config values from
+  configuration"
+  hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode 
or DataNode."
+  hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
+  hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
+  hadoop_add_subcommand "o3" client "command line interface for ozone"
+  hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata 
into relational data"
+  hadoop_add_subcommand "scm" daemon "run the Storage Container Manager 
service"
+  hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container 
Manager "
+  hadoop_add_subcommand "version" client "print the version"
+
+  hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
+}
+
+## @description  Default command handler for hadoop command
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        CLI arguments
+function ozonecmd_case
+{
+  subcmd=$1
+  shift
+
+  case ${subcmd} in
+    cblock)
+      HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
+    ;;
+    cblockserver)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      HADOOP_CLASSNAME=org.apache.hadoop.cblock.CBlockManager
+    ;;
+    classpath)
+      hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
+    ;;
+    datanode)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
+      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
+      hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
+      hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
+    ;;
+    envvars)
+      echo "JAVA_HOME='${JAVA_HOME}'"
+      echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
+      echo "HDFS_DIR='${HDFS_DIR}'"
+      echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
+      echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
+      echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
+      echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
+      echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
+      if [[ -n "${QATESTMODE}" ]]; then
+        echo "MYNAME=${MYNAME}"
+        echo "HADOOP_SHELL_EXECNAME=${HADOOP_SHELL_EXECNAME}"
+      fi
+      exit 0
+    ;;
+    freon)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.Freon
+    ;;
+    genesis)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
+    ;;
+    getozoneconf)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
+    ;;
+    jscsi)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      HADOOP_CLASSNAME=org.apache.hadoop.cblock.jscsiHelper.SCSITargetDaemon
+    ;;
+    ksm)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
+    ;;
+    oz)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
+    ;;
+    noz)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SQLCLI
+    ;;
+    scm)
+      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+      HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.StorageContainerManager'
+      hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto 
HADOOP_OPTS"
+      HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}"
+    ;;
+    scmcli)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
+    ;;
+    version)
+      HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
+    ;;
+    *)
+      HADOOP_CLASSNAME="${subcmd}"
+      if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+  esac
+}
+
+# let's locate libexec...
+if [[ -n "${HADOOP_HOME}" ]]; then
+  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
+else
+  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
+  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  # shellcheck 
source=./hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
+fi
+
+# now that we have support code, let's abs MYNAME so we can use it later
+MYNAME=$(hadoop_abs "${MYNAME}")
+
+if [[ $# = 0 ]]; then
+  hadoop_exit_with_usage 1
+fi
+
+HADOOP_SUBCMD=$1
+shift
+
+if hadoop_need_reexec ozone "${HADOOP_SUBCMD}"; then
+  hadoop_uservar_su ozone "${HADOOP_SUBCMD}" \
+    "${MYNAME}" \
+    "--reexec" \
+    "${HADOOP_USER_PARAMS[@]}"
+  exit $?
+fi
+
+hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
+HADOOP_SUBCMD_ARGS=("$@")
+
+if declare -f ozone_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
+  hadoop_debug "Calling dynamically: ozone_subcommand_${HADOOP_SUBCMD} 
${HADOOP_SUBCMD_ARGS[*]}"
+  "ozone_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
+else
+  ozonecmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
+fi
+
+hadoop_add_client_opts
+
+if [[ ${HADOOP_WORKER_MODE} = true ]]; then
+  hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/ozone" 
"${HADOOP_USER_PARAMS[@]}"
+  exit $?
+fi
+
+hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
+# everything is in globals at this point, so call the generic handler
+hadoop_generic_java_subcmd_handler

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/common/src/main/bin/start-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh 
b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index aabbab2..dda0a1c 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -47,8 +47,8 @@ else
   exit 1
 fi
 
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
 
 if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} 
== "true" ]]; then
   echo "Ozone is not supported in a security enabled cluster."
@@ -57,7 +57,7 @@ fi
 
 #---------------------------------------------------------
 # Check if ozone is enabled
-OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey 
ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
+OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
 if [[ "${OZONE_ENABLED}" != "true" ]]; then
   echo "Operation is not supported because ozone is not enabled."
   exit -1
@@ -74,13 +74,13 @@ fi
 
 #---------------------------------------------------------
 # Ozone keyspacemanager nodes
-KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -keyspacemanagers 
2>/dev/null)
+KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 
2>/dev/null)
 echo "Starting key space manager nodes [${KSM_NODES}]"
 if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
   KSM_NODES=$(hostname)
 fi
 
-hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
+hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
   --hostnames "${KSM_NODES}" \
@@ -91,9 +91,9 @@ HADOOP_JUMBO_RETCOUNTER=$?
 
 #---------------------------------------------------------
 # Ozone storagecontainermanager nodes
-SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf 
-storagecontainermanagers 2>/dev/null)
+SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-storagecontainermanagers 2>/dev/null)
 echo "Starting storage container manager nodes [${SCM_NODES}]"
-hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/oz" \
+hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
   --hostnames "${SCM_NODES}" \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/common/src/main/bin/stop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh 
b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
index fb6ada1..be55be4 100644
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
@@ -47,8 +47,8 @@ else
   exit 1
 fi
 
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
 
 if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} 
== "true" ]]; then
   echo "Ozone is not supported in a security enabled cluster."
@@ -57,7 +57,7 @@ fi
 
 #---------------------------------------------------------
 # Check if ozone is enabled
-OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -confKey 
ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
+OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey 
ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)
 if [[ "${OZONE_ENABLED}" != "true" ]]; then
   echo "Operation is not supported because ozone is not enabled."
   exit -1
@@ -74,13 +74,13 @@ fi
 
 #---------------------------------------------------------
 # Ozone keyspacemanager nodes
-KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf -keyspacemanagers 
2>/dev/null)
+KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 
2>/dev/null)
 echo "Stopping key space manager nodes [${KSM_NODES}]"
 if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
   KSM_NODES=$(hostname)
 fi
 
-hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
+hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
   --hostnames "${KSM_NODES}" \
@@ -89,9 +89,9 @@ hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/oz" \
 
 #---------------------------------------------------------
 # Ozone storagecontainermanager nodes
-SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/oz" getozoneconf 
-storagecontainermanagers 2>/dev/null)
+SCM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf 
-storagecontainermanagers 2>/dev/null)
 echo "Stopping storage container manager nodes [${SCM_NODES}]"
-hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/oz" \
+hadoop_uservar_su hdfs scm "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
   --hostnames "${SCM_NODES}" \

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
index e43cada..0fef77c 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.util.ToolRunner;
  */
 public class OzoneGetConf extends Configured implements Tool {
 
-  private static final String DESCRIPTION = "oz getconf is utility for "
+  private static final String DESCRIPTION = "ozone getconf is utility for "
       + "getting configuration information from the config file.\n";
 
   enum Command {
@@ -102,7 +102,7 @@ public class OzoneGetConf extends Configured implements 
Tool {
 
     /* Initialize USAGE based on Command values */
     StringBuilder usage = new StringBuilder(DESCRIPTION);
-    usage.append("\noz getconf \n");
+    usage.append("\nozone getconf \n");
     for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) {
       usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
           + "\n");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh 
b/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh
index 0b7a5a7..c016165 100644
--- a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh
+++ b/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if [[ "${HADOOP_SHELL_EXECNAME}" = oz ]]; then
+if [[ "${HADOOP_SHELL_EXECNAME}" = ozone ]]; then
    hadoop_add_profile ozone
 fi
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index 39ef396..e9acb35 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -106,8 +106,8 @@ public final class KeySpaceManager extends 
ServiceRuntimeInfoImpl
       LoggerFactory.getLogger(KeySpaceManager.class);
 
   private static final String USAGE =
-      "Usage: \n oz ksm [genericOptions] " + "[ "
-          + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "oz ksm [ "
+      "Usage: \n ozone ksm [genericOptions] " + "[ "
+          + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone ksm [ 
"
           + StartupOption.HELP.getName() + " ]\n";
 
   /** Startup options. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index 8553255..2aec0fc 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -193,25 +193,25 @@ public class Shell extends Configured implements Tool {
 
 
     Option createVolume = new Option(CREATE_VOLUME, true, "creates a volume" +
-        "for the specified user.\n \t For example : hdfs oz  -createVolume " +
+        "for the specified user.\n \t For example : hdfs o3  -createVolume " +
         "<volumeURI> -root -user <userName>\n");
     options.addOption(createVolume);
 
     Option deleteVolume = new Option(DELETE_VOLUME, true, "deletes a volume" +
-        "if it is empty.\n \t For example : hdfs oz -deleteVolume <volumeURI>" 
+
+        "if it is empty.\n \t For example : ozone oz -deleteVolume 
<volumeURI>" +
         " -root \n");
     options.addOption(deleteVolume);
 
     Option listVolume =
         new Option(LIST_VOLUME, true, "List the volumes of a given user.\n" +
-            "For example : hdfs oz -listVolume <ozoneURI>" +
-            "-user <username> -root or hdfs oz " +
+            "For example : ozone oz -listVolume <ozoneURI>" +
+            "-user <username> -root or ozone oz " +
             "-listVolume");
     options.addOption(listVolume);
 
     Option updateVolume =
         new Option(UPDATE_VOLUME, true, "updates an existing volume.\n" +
-            "\t For example : hdfs oz " +
+            "\t For example : ozone oz " +
             "-updateVolume <volumeURI> -quota " +
             "100TB\n");
     options.addOption(updateVolume);
@@ -230,7 +230,7 @@ public class Shell extends Configured implements Tool {
   private void addBucketCommands(Options opts) {
     Option createBucket = new Option(CREATE_BUCKET, true,
         "creates a bucket in a given volume." +
-        "For example: hdfs oz -createBucket <bucketURI>");
+        "For example: ozone oz -createBucket <bucketURI>");
     opts.addOption(createBucket);
 
     Option infoBucket =
@@ -247,7 +247,7 @@ public class Shell extends Configured implements Tool {
 
     Option updateBucket =
         new Option(UPDATE_BUCKET, true, "allows changing bucket attributes.\n" 
+
-            " For example: hdfs oz -updateBucket <bucketURI> " +
+            " For example: ozone oz -updateBucket <bucketURI> " +
             "-addAcl user:frodo:rw");
     opts.addOption(updateBucket);
 
@@ -396,7 +396,7 @@ public class Shell extends Configured implements Tool {
         return 0;
       } else {
         HelpFormatter helpFormatter = new HelpFormatter();
-        helpFormatter.printHelp(eightyColumn, "hdfs oz -command uri [args]",
+        helpFormatter.printHelp(eightyColumn, "ozone oz -command uri [args]",
             "Ozone Commands",
             opts, "Please correct your command and try again.");
         return 1;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8658ed7d/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md 
b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
index 9df974f..a274a22 100644
--- a/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
+++ b/hadoop-ozone/ozone-manager/src/main/site/markdown/OzoneCommandShell.md
@@ -21,14 +21,14 @@ with simple authentication.
 
 The Ozone commands take the following format.
 
-* `hdfs oz --command_ http://hostname:port/volume/bucket/key -user
+* `ozone oz --command_ http://hostname:port/volume/bucket/key -user
 <name> -root`
 
 The *port* specified in command should match the port mentioned in the config
 property `dfs.datanode.http.address`. This property can be set in 
`hdfs-site.xml`.
 The default value for the port is `9864` and is used in below commands.
 
-The *--root* option is a command line short cut that allows *hdfs oz*
+The *--root* option is a command line short cut that allows *ozone oz*
 commands to be run as the user that started the cluster. This is useful to
 indicate that you want the commands to be run as some admin user. The only
 reason for this option is that it makes the life of a lazy developer more
@@ -44,7 +44,7 @@ ozone cluster.
 
 Volumes can be created only by Admins. Here is an example of creating a volume.
 
-* `hdfs oz -createVolume http://localhost:9864/hive -user bilbo -quota
+* `ozone oz -createVolume http://localhost:9864/hive -user bilbo -quota
 100TB -root`
 
 The above command creates a volume called `hive` owned by user `bilbo`. The
@@ -55,26 +55,26 @@ admin in the cluster.
 
 Updates information like ownership and quota on an existing volume.
 
-* `hdfs oz  -updateVolume  http://localhost:9864/hive -quota 500TB -root`
+* `ozone oz  -updateVolume  http://localhost:9864/hive -quota 500TB -root`
 
 The above command changes the volume quota of hive from 100TB to 500TB.
 
 ### Delete Volume
 Deletes a Volume if it is empty.
 
-* `hdfs oz -deleteVolume http://localhost:9864/hive -root`
+* `ozone oz -deleteVolume http://localhost:9864/hive -root`
 
 
 ### Info Volume
 Info volume command allows the owner or the administrator of the cluster to 
read meta-data about a specific volume.
 
-* `hdfs oz -infoVolume http://localhost:9864/hive -root`
+* `ozone oz -infoVolume http://localhost:9864/hive -root`
 
 ### List Volumes
 
 List volume command can be used by administrator to list volumes of any user. 
It can also be used by a user to list volumes owned by him.
 
-* `hdfs oz -listVolume http://localhost:9864/ -user bilbo -root`
+* `ozone oz -listVolume http://localhost:9864/ -user bilbo -root`
 
 The above command lists all volumes owned by user bilbo.
 
@@ -89,7 +89,7 @@ Following examples assume that these commands are run by the 
owner of the volume
 
 Create bucket call allows the owner of a volume to create a bucket.
 
-* `hdfs oz -createBucket http://localhost:9864/hive/january`
+* `ozone oz -createBucket http://localhost:9864/hive/january`
 
 This call creates a bucket called `january` in the volume called `hive`. If
 the volume does not exist, then this call will fail.
@@ -98,23 +98,23 @@ the volume does not exist, then this call will fail.
 ### Update Bucket
 Updates bucket meta-data, like ACLs.
 
-* `hdfs oz -updateBucket http://localhost:9864/hive/january  -addAcl
+* `ozone oz -updateBucket http://localhost:9864/hive/january  -addAcl
 user:spark:rw`
 
 ### Delete Bucket
 Deletes a bucket if it is empty.
 
-* `hdfs oz -deleteBucket http://localhost:9864/hive/january`
+* `ozone oz -deleteBucket http://localhost:9864/hive/january`
 
 ### Info Bucket
 Returns information about a given bucket.
 
-* `hdfs oz -infoBucket http://localhost:9864/hive/january`
+* `ozone oz -infoBucket http://localhost:9864/hive/january`
 
 ### List Buckets
 List buckets on a given volume.
 
-* `hdfs oz -listBucket http://localhost:9864/hive`
+* `ozone oz -listBucket http://localhost:9864/hive`
 
 Ozone Key Commands
 ------------------
@@ -125,26 +125,26 @@ Ozone key commands allows users to put, delete and get 
keys from ozone buckets.
 Creates or overwrites a key in ozone store, -file points to the file you want
 to upload.
 
-* `hdfs oz -putKey  http://localhost:9864/hive/january/processed.orc  -file
+* `ozone oz -putKey  http://localhost:9864/hive/january/processed.orc  -file
 processed.orc`
 
 ### Get Key
 Downloads a file from the ozone bucket.
 
-* `hdfs oz -getKey  http://localhost:9864/hive/january/processed.orc  -file
+* `ozone oz -getKey  http://localhost:9864/hive/january/processed.orc  -file
   processed.orc.copy`
 
 ### Delete Key
 Deletes a key  from the ozone store.
 
-* `hdfs oz -deleteKey http://localhost:9864/hive/january/processed.orc`
+* `ozone oz -deleteKey http://localhost:9864/hive/january/processed.orc`
 
 ### Info Key
 Reads  key metadata from the ozone store.
 
-* `hdfs oz -infoKey http://localhost:9864/hive/january/processed.orc`
+* `ozone oz -infoKey http://localhost:9864/hive/january/processed.orc`
 
 ### List Keys
 List all keys in an ozone bucket.
 
-* `hdfs oz -listKey  http://localhost:9864/hive/january`
+* `ozone oz -listKey  http://localhost:9864/hive/january`


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to