HDFS-13320. Ozone: Support for MicrobenchMarking Tool. Contributed by 
Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d34288ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d34288ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d34288ae

Branch: refs/heads/HDFS-7240
Commit: d34288ae34142670f2b8569f63926b86ccc0ff2b
Parents: f344787
Author: Nanda kumar <na...@apache.org>
Authored: Wed Apr 4 19:44:49 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Wed Apr 4 19:45:05 2018 +0530

----------------------------------------------------------------------
 .../ContainerStates/ContainerStateMap.java      |    2 +
 hadoop-ozone/common/src/main/bin/oz             |  142 +--
 .../apache/hadoop/ozone/freon/OzoneGetConf.java |  269 ++++
 .../apache/hadoop/ozone/freon/package-info.java |   21 +
 .../apache/hadoop/ozone/tools/OzoneGetConf.java |  269 ----
 .../apache/hadoop/ozone/tools/package-info.java |   21 -
 .../hadoop/ozone/freon/TestDataValidate.java    |  146 +++
 .../apache/hadoop/ozone/freon/TestFreon.java    |  136 +++
 .../apache/hadoop/ozone/freon/package-info.java |   21 +
 .../BenchmarkContainerStateMap.java             |  180 ---
 .../ContainerStates/TestContainerStateMap.java  |  226 ----
 .../hadoop/ozone/tools/TestDataValidate.java    |  146 ---
 .../apache/hadoop/ozone/tools/TestFreon.java    |  136 ---
 .../apache/hadoop/ozone/tools/package-info.java |   21 -
 hadoop-ozone/tools/pom.xml                      |   18 +
 .../org/apache/hadoop/ozone/freon/Freon.java    | 1146 ++++++++++++++++++
 .../apache/hadoop/ozone/freon/package-info.java |   22 +
 .../genesis/BenchMarkContainerStateMap.java     |  190 +++
 .../genesis/BenchMarkDatanodeDispatcher.java    |  263 ++++
 .../genesis/BenchMarkMetadataStoreReads.java    |   50 +
 .../genesis/BenchMarkMetadataStoreWrites.java   |   43 +
 .../ozone/genesis/BenchMarkRocksDbStore.java    |  115 ++
 .../apache/hadoop/ozone/genesis/Genesis.java    |   50 +
 .../hadoop/ozone/genesis/GenesisUtil.java       |   75 ++
 .../hadoop/ozone/genesis/package-info.java      |   25 +
 .../org/apache/hadoop/ozone/tools/Freon.java    | 1146 ------------------
 .../apache/hadoop/ozone/tools/package-info.java |   22 -
 .../org/apache/hadoop/test/OzoneTestDriver.java |    2 +-
 hadoop-project/pom.xml                          |    2 -
 29 files changed, 2599 insertions(+), 2306 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerStateMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerStateMap.java
 
b/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerStateMap.java
index e31f696..fdbf267 100644
--- 
a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerStateMap.java
+++ 
b/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerStateMap.java
@@ -114,6 +114,8 @@ public class ContainerStateMap {
    */
   public void addContainer(ContainerInfo info)
       throws SCMException {
+    Preconditions.checkNotNull(info, "Container Info cannot be null");
+    Preconditions.checkNotNull(info.getPipeline(), "Pipeline cannot be null");
 
     try (AutoCloseableLock lock = autoLock.acquire()) {
       ContainerID id = ContainerID.valueof(info.getContainerID());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/common/src/main/bin/oz
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/oz 
b/hadoop-ozone/common/src/main/bin/oz
index c922eff..7e9c396 100755
--- a/hadoop-ozone/common/src/main/bin/oz
+++ b/hadoop-ozone/common/src/main/bin/oz
@@ -33,50 +33,23 @@ function hadoop_usage
   hadoop_add_option "--workers" "turn on worker mode"
 
 
-  hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
-  hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
   hadoop_add_subcommand "cblock" admin "cblock CLI"
   hadoop_add_subcommand "cblockserver" daemon "run cblock server"
   hadoop_add_subcommand "classpath" client "prints the class path needed to 
get the hadoop jar and the required libraries"
-  hadoop_add_subcommand "crypto" admin "configure HDFS encryption zones"
   hadoop_add_subcommand "datanode" daemon "run a DFS datanode"
-  hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug 
commands"
-  hadoop_add_subcommand "dfs" client "run a filesystem command on the file 
system"
-  hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client"
-  hadoop_add_subcommand "dfsrouter" daemon "run the DFS router"
-  hadoop_add_subcommand "dfsrouteradmin" admin "manage Router-based federation"
-  hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among 
disks on a given node"
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment 
variables"
-  hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
-  hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the 
NameNode"
   hadoop_add_subcommand "freon" client "runs an ozone data generator"
-  hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
-  hadoop_add_subcommand "getconf" client "get config values from configuration"
+  hadoop_add_subcommand "genesis" client "runs a collection of ozone 
benchmarks to help with tuning."
   hadoop_add_subcommand "getozoneconf" client "get ozone config values from
   configuration"
-  hadoop_add_subcommand "groups" client "get the groups which users belong to"
-  hadoop_add_subcommand "haadmin" admin "run a DFS HA admin client"
   hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode 
or DataNode."
-  hadoop_add_subcommand "journalnode" daemon "run the DFS journalnode"
   hadoop_add_subcommand "jscsi" daemon "run cblock jscsi server"
   hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
-  hadoop_add_subcommand "lsSnapshottableDir" client "list all snapshottable 
dirs owned by the current user"
-  hadoop_add_subcommand "mover" daemon "run a utility to move block replicas 
across storage types"
-  hadoop_add_subcommand "namenode" daemon "run the DFS namenode"
-  hadoop_add_subcommand "nfs3" daemon "run an NFS version 3 gateway"
-  hadoop_add_subcommand "oev" admin "apply the offline edits viewer to an 
edits file"
-  hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an 
fsimage"
-  hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer 
to a legacy fsimage"
-  hadoop_add_subcommand "oz" client "command line interface for ozone"
-  hadoop_add_subcommand "oz_debug" client "ozone debug tool, convert ozone 
metadata into relational data"
-  hadoop_add_subcommand "portmap" daemon "run a portmap service"
+  hadoop_add_subcommand "o3" client "command line interface for ozone"
+  hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata 
into relational data"
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager 
service"
   hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container 
Manager "
-  hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
-  hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
-  hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage 
policies"
   hadoop_add_subcommand "version" client "print the version"
-  hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
 
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
 }
@@ -92,13 +65,6 @@ function ozonecmd_case
   shift
 
   case ${subcmd} in
-    balancer)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.balancer.Balancer
-    ;;
-    cacheadmin)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
-    ;;
     cblock)
       HADOOP_CLASSNAME=org.apache.hadoop.cblock.cli.CBlockCli
     ;;
@@ -109,9 +75,6 @@ function ozonecmd_case
     classpath)
       hadoop_do_classpath_subcommand HADOOP_CLASSNAME "$@"
     ;;
-    crypto)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CryptoAdmin
-    ;;
     datanode)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       
HADOOP_SECURE_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
@@ -119,25 +82,6 @@ function ozonecmd_case
       hadoop_deprecate_envvar HADOOP_SECURE_DN_PID_DIR HADOOP_SECURE_PID_DIR
       hadoop_deprecate_envvar HADOOP_SECURE_DN_LOG_DIR HADOOP_SECURE_LOG_DIR
     ;;
-    debug)
-      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DebugAdmin'
-    ;;
-    dfs)
-      HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
-    ;;
-    dfsadmin)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin
-    ;;
-    dfsrouter)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.DFSRouter'
-    ;;
-    dfsrouteradmin)
-      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
-    ;;
-    diskbalancer)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
-    ;;
     envvars)
       echo "JAVA_HOME='${JAVA_HOME}'"
       echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"
@@ -153,87 +97,29 @@ function ozonecmd_case
       fi
       exit 0
     ;;
-    ec)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.ECAdmin
-    ;;
-    fetchdt)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
-    ;;
     freon)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.Freon
     ;;
-    fsck)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
-    ;;
-    getconf)
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.GetConf
+    genesis)
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis
     ;;
     getozoneconf)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.tools.OzoneGetConf
     ;;
-    groups)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetGroups
-    ;;
-    haadmin)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSHAAdmin
-    ;;
-    journalnode)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
-    ;;
     jscsi)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME=org.apache.hadoop.cblock.jscsiHelper.SCSITargetDaemon
     ;;
-    jmxget)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet
-    ;;
     ksm)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
     ;;
-    lsSnapshottableDir)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
-    ;;
-    mover)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover
-    ;;
-    namenode)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.NameNode'
-      hadoop_add_param HADOOP_OPTS hdfs.audit.logger 
"-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
-    ;;
-    nfs3)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      # shellcheck disable=SC2034
-      
HADOOP_SECURE_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
-      hadoop_deprecate_envvar HADOOP_SECURE_NFS3_LOG_DIR HADOOP_SECURE_LOG_DIR
-      hadoop_deprecate_envvar HADOOP_SECURE_NFS3_PID_DIR HADOOP_SECURE_PID_DIR
-    ;;
-    oev)
-      
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
-    ;;
-    oiv)
-      
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
-    ;;
-    oiv_legacy)
-      
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
-    ;;
     oz)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell
     ;;
-    oz_debug)
+    noz)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SQLCLI
     ;;
-    portmap)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
-    ;;
-    scmcli)
-       HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
-    ;;
     scm)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
       HADOOP_CLASSNAME='org.apache.hadoop.ozone.scm.StorageContainerManager'
@@ -243,25 +129,9 @@ function ozonecmd_case
     scmcli)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.scm.cli.SCMCLI
     ;;
-    secondarynamenode)
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
-      hadoop_add_param HADOOP_OPTS hdfs.audit.logger 
"-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
-    ;;
-    snapshotDiff)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff
-    ;;
-    storagepolicies)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
-    ;;
     version)
       HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
     ;;
-    zkfc)
-      # shellcheck disable=SC2034
-      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
-    ;;
     *)
       HADOOP_CLASSNAME="${subcmd}"
       if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
new file mode 100644
index 0000000..4541472
--- /dev/null
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
@@ -0,0 +1,269 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.freon;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdsl.HdslUtils;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.KsmUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * CLI utility to print out ozone related configuration.
+ */
+public class OzoneGetConf extends Configured implements Tool {
+
+  private static final String DESCRIPTION = "oz getconf is utility for "
+      + "getting configuration information from the config file.\n";
+
+  enum Command {
+    INCLUDE_FILE("-includeFile",
+        "gets the include file path that defines the datanodes " +
+            "that can join the cluster."),
+    EXCLUDE_FILE("-excludeFile",
+        "gets the exclude file path that defines the datanodes " +
+            "that need to decommissioned."),
+    KEYSPACEMANAGER("-keyspacemanagers",
+        "gets list of ozone key space manager nodes in the cluster"),
+    STORAGECONTAINERMANAGER("-storagecontainermanagers",
+        "gets list of ozone storage container manager nodes in the cluster"),
+    CONFKEY("-confKey [key]", "gets a specific key from the configuration");
+
+    private static final Map<String, OzoneGetConf.CommandHandler> HANDLERS;
+
+    static {
+      HANDLERS = new HashMap<String, OzoneGetConf.CommandHandler>();
+      HANDLERS.put(StringUtils.toLowerCase(KEYSPACEMANAGER.getName()),
+          new KeySpaceManagersCommandHandler());
+      HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()),
+          new StorageContainerManagersCommandHandler());
+      HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()),
+          new PrintConfKeyCommandHandler());
+    }
+
+    private final String cmd;
+    private final String description;
+
+    Command(String cmd, String description) {
+      this.cmd = cmd;
+      this.description = description;
+    }
+
+    public String getName() {
+      return cmd.split(" ")[0];
+    }
+
+    public String getUsage() {
+      return cmd;
+    }
+
+    public String getDescription() {
+      return description;
+    }
+
+    public static OzoneGetConf.CommandHandler getHandler(String cmd) {
+      return HANDLERS.get(StringUtils.toLowerCase(cmd));
+    }
+  }
+
+  static final String USAGE;
+  static {
+    HdfsConfiguration.init();
+
+    /* Initialize USAGE based on Command values */
+    StringBuilder usage = new StringBuilder(DESCRIPTION);
+    usage.append("\noz getconf \n");
+    for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) {
+      usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
+          + "\n");
+    }
+    USAGE = usage.toString();
+  }
+
+  /**
+   * Handler to return value for key corresponding to the
+   * {@link OzoneGetConf.Command}.
+   */
+  static class CommandHandler {
+    String key; // Configuration key to lookup
+
+    CommandHandler() {
+      this(null);
+    }
+
+    CommandHandler(String key) {
+      this.key = key;
+    }
+
+    final int doWork(OzoneGetConf tool, String[] args) {
+      try {
+        checkArgs(args);
+
+        return doWorkInternal(tool, args);
+      } catch (Exception e) {
+        tool.printError(e.getMessage());
+      }
+      return -1;
+    }
+
+    protected void checkArgs(String args[]) {
+      if (args.length > 0) {
+        throw new HadoopIllegalArgumentException(
+            "Did not expect argument: " + args[0]);
+      }
+    }
+
+
+    /** Method to be overridden by sub classes for specific behavior */
+    int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
+
+      String value = tool.getConf().getTrimmed(key);
+      if (value != null) {
+        tool.printOut(value);
+        return 0;
+      }
+      tool.printError("Configuration " + key + " is missing.");
+      return -1;
+    }
+  }
+
+  static class PrintConfKeyCommandHandler extends OzoneGetConf.CommandHandler {
+    @Override
+    protected void checkArgs(String[] args) {
+      if (args.length != 1) {
+        throw new HadoopIllegalArgumentException(
+            "usage: " + OzoneGetConf.Command.CONFKEY.getUsage());
+      }
+    }
+
+    @Override
+    int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
+      this.key = args[0];
+      return super.doWorkInternal(tool, args);
+    }
+  }
+
+  private final PrintStream out; // Stream for printing command output
+  private final PrintStream err; // Stream for printing error
+
+  protected OzoneGetConf(Configuration conf) {
+    this(conf, System.out, System.err);
+  }
+
+  protected OzoneGetConf(Configuration conf, PrintStream out, PrintStream err) 
{
+    super(conf);
+    this.out = out;
+    this.err = err;
+  }
+
+  void printError(String message) {
+    err.println(message);
+  }
+
+  void printOut(String message) {
+    out.println(message);
+  }
+
+  private void printUsage() {
+    printError(USAGE);
+  }
+
+  /**
+   * Main method that runs the tool for given arguments.
+   * @param args arguments
+   * @return return status of the command
+   */
+  private int doWork(String[] args) {
+    if (args.length >= 1) {
+      OzoneGetConf.CommandHandler handler = 
OzoneGetConf.Command.getHandler(args[0]);
+      if (handler != null) {
+        return handler.doWork(this,
+            Arrays.copyOfRange(args, 1, args.length));
+      }
+    }
+    printUsage();
+    return -1;
+  }
+
+  @Override
+  public int run(final String[] args) throws Exception {
+    return SecurityUtil.doAsCurrentUser(
+          new PrivilegedExceptionAction<Integer>() {
+            @Override
+            public Integer run() throws Exception {
+              return doWork(args);
+            }
+          });
+  }
+
+  /**
+   * Handler for {@link Command#STORAGECONTAINERMANAGER}.
+   */
+  static class StorageContainerManagersCommandHandler extends CommandHandler {
+
+    @Override
+    public int doWorkInternal(OzoneGetConf tool, String[] args)
+        throws IOException {
+      Collection<InetSocketAddress> addresses = HdslUtils
+          .getSCMAddresses(tool.getConf());
+
+      for (InetSocketAddress addr : addresses) {
+        tool.printOut(addr.getHostName());
+      }
+      return 0;
+    }
+  }
+
+  /**
+   * Handler for {@link Command#KEYSPACEMANAGER}.
+   */
+  static class KeySpaceManagersCommandHandler extends CommandHandler {
+    @Override
+    public int doWorkInternal(OzoneGetConf tool, String[] args) throws 
IOException {
+      tool.printOut(KsmUtils.getKsmAddress(tool.getConf())
+          .getHostName());
+      return 0;
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+
+    Configuration conf = new Configuration();
+    conf.addResource(new OzoneConfiguration());
+    int res = ToolRunner.run(new OzoneGetConf(conf), args);
+    System.exit(res);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
new file mode 100644
index 0000000..150c64e
--- /dev/null
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.freon;
+/**
+ * Classes related to Ozone tools.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/OzoneGetConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/OzoneGetConf.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/OzoneGetConf.java
deleted file mode 100644
index dc81ab3..0000000
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/OzoneGetConf.java
+++ /dev/null
@@ -1,269 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.tools;
-
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdsl.HdslUtils;
-import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.KsmUtils;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * CLI utility to print out ozone related configuration.
- */
-public class OzoneGetConf extends Configured implements Tool {
-
-  private static final String DESCRIPTION = "oz getconf is utility for "
-      + "getting configuration information from the config file.\n";
-
-  enum Command {
-    INCLUDE_FILE("-includeFile",
-        "gets the include file path that defines the datanodes " +
-            "that can join the cluster."),
-    EXCLUDE_FILE("-excludeFile",
-        "gets the exclude file path that defines the datanodes " +
-            "that need to decommissioned."),
-    KEYSPACEMANAGER("-keyspacemanagers",
-        "gets list of ozone key space manager nodes in the cluster"),
-    STORAGECONTAINERMANAGER("-storagecontainermanagers",
-        "gets list of ozone storage container manager nodes in the cluster"),
-    CONFKEY("-confKey [key]", "gets a specific key from the configuration");
-
-    private static final Map<String, OzoneGetConf.CommandHandler> HANDLERS;
-
-    static {
-      HANDLERS = new HashMap<String, OzoneGetConf.CommandHandler>();
-      HANDLERS.put(StringUtils.toLowerCase(KEYSPACEMANAGER.getName()),
-          new KeySpaceManagersCommandHandler());
-      HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()),
-          new StorageContainerManagersCommandHandler());
-      HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()),
-          new PrintConfKeyCommandHandler());
-    }
-
-    private final String cmd;
-    private final String description;
-
-    Command(String cmd, String description) {
-      this.cmd = cmd;
-      this.description = description;
-    }
-
-    public String getName() {
-      return cmd.split(" ")[0];
-    }
-
-    public String getUsage() {
-      return cmd;
-    }
-
-    public String getDescription() {
-      return description;
-    }
-
-    public static OzoneGetConf.CommandHandler getHandler(String cmd) {
-      return HANDLERS.get(StringUtils.toLowerCase(cmd));
-    }
-  }
-
-  static final String USAGE;
-  static {
-    HdfsConfiguration.init();
-
-    /* Initialize USAGE based on Command values */
-    StringBuilder usage = new StringBuilder(DESCRIPTION);
-    usage.append("\noz getconf \n");
-    for (OzoneGetConf.Command cmd : OzoneGetConf.Command.values()) {
-      usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
-          + "\n");
-    }
-    USAGE = usage.toString();
-  }
-
-  /**
-   * Handler to return value for key corresponding to the
-   * {@link OzoneGetConf.Command}.
-   */
-  static class CommandHandler {
-    String key; // Configuration key to lookup
-
-    CommandHandler() {
-      this(null);
-    }
-
-    CommandHandler(String key) {
-      this.key = key;
-    }
-
-    final int doWork(OzoneGetConf tool, String[] args) {
-      try {
-        checkArgs(args);
-
-        return doWorkInternal(tool, args);
-      } catch (Exception e) {
-        tool.printError(e.getMessage());
-      }
-      return -1;
-    }
-
-    protected void checkArgs(String args[]) {
-      if (args.length > 0) {
-        throw new HadoopIllegalArgumentException(
-            "Did not expect argument: " + args[0]);
-      }
-    }
-
-
-    /** Method to be overridden by sub classes for specific behavior */
-    int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
-
-      String value = tool.getConf().getTrimmed(key);
-      if (value != null) {
-        tool.printOut(value);
-        return 0;
-      }
-      tool.printError("Configuration " + key + " is missing.");
-      return -1;
-    }
-  }
-
-  static class PrintConfKeyCommandHandler extends OzoneGetConf.CommandHandler {
-    @Override
-    protected void checkArgs(String[] args) {
-      if (args.length != 1) {
-        throw new HadoopIllegalArgumentException(
-            "usage: " + OzoneGetConf.Command.CONFKEY.getUsage());
-      }
-    }
-
-    @Override
-    int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception {
-      this.key = args[0];
-      return super.doWorkInternal(tool, args);
-    }
-  }
-
-  private final PrintStream out; // Stream for printing command output
-  private final PrintStream err; // Stream for printing error
-
-  protected OzoneGetConf(Configuration conf) {
-    this(conf, System.out, System.err);
-  }
-
-  protected OzoneGetConf(Configuration conf, PrintStream out, PrintStream err) 
{
-    super(conf);
-    this.out = out;
-    this.err = err;
-  }
-
-  void printError(String message) {
-    err.println(message);
-  }
-
-  void printOut(String message) {
-    out.println(message);
-  }
-
-  private void printUsage() {
-    printError(USAGE);
-  }
-
-  /**
-   * Main method that runs the tool for given arguments.
-   * @param args arguments
-   * @return return status of the command
-   */
-  private int doWork(String[] args) {
-    if (args.length >= 1) {
-      OzoneGetConf.CommandHandler handler = 
OzoneGetConf.Command.getHandler(args[0]);
-      if (handler != null) {
-        return handler.doWork(this,
-            Arrays.copyOfRange(args, 1, args.length));
-      }
-    }
-    printUsage();
-    return -1;
-  }
-
-  @Override
-  public int run(final String[] args) throws Exception {
-    return SecurityUtil.doAsCurrentUser(
-          new PrivilegedExceptionAction<Integer>() {
-            @Override
-            public Integer run() throws Exception {
-              return doWork(args);
-            }
-          });
-  }
-
-  /**
-   * Handler for {@link Command#STORAGECONTAINERMANAGER}.
-   */
-  static class StorageContainerManagersCommandHandler extends CommandHandler {
-
-    @Override
-    public int doWorkInternal(OzoneGetConf tool, String[] args)
-        throws IOException {
-      Collection<InetSocketAddress> addresses = HdslUtils
-          .getSCMAddresses(tool.getConf());
-
-      for (InetSocketAddress addr : addresses) {
-        tool.printOut(addr.getHostName());
-      }
-      return 0;
-    }
-  }
-
-  /**
-   * Handler for {@link Command#KEYSPACEMANAGER}.
-   */
-  static class KeySpaceManagersCommandHandler extends CommandHandler {
-    @Override
-    public int doWorkInternal(OzoneGetConf tool, String[] args) throws 
IOException {
-      tool.printOut(KsmUtils.getKsmAddress(tool.getConf())
-          .getHostName());
-      return 0;
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-
-    Configuration conf = new Configuration();
-    conf.addResource(new OzoneConfiguration());
-    int res = ToolRunner.run(new OzoneGetConf(conf), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/package-info.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/package-info.java
deleted file mode 100644
index 02802b8..0000000
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/tools/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.tools;
-/**
- * Classes related to Ozone tools.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
new file mode 100644
index 0000000..860851b
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Tests Freon, with MiniOzoneCluster and validate data.
+ */
+public class TestDataValidate {
+
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = new MiniOzoneClassicCluster.Builder(conf)
+        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
+        .numDataNodes(5).build();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void ratisTestLargeKey() throws Exception {
+    List<String> args = new ArrayList<>();
+    args.add("-validateWrites");
+    args.add("-numOfVolumes");
+    args.add("1");
+    args.add("-numOfBuckets");
+    args.add("1");
+    args.add("-numOfKeys");
+    args.add("1");
+    args.add("-ratis");
+    args.add("3");
+    args.add("-keySize");
+    args.add("104857600");
+    Freon freon = new Freon(conf);
+    int res = ToolRunner.run(conf, freon,
+        args.toArray(new String[0]));
+    Assert.assertEquals(1, freon.getNumberOfVolumesCreated());
+    Assert.assertEquals(1, freon.getNumberOfBucketsCreated());
+    Assert.assertEquals(1, freon.getNumberOfKeysAdded());
+    Assert.assertEquals(0, freon.getUnsuccessfulValidationCount());
+    Assert.assertEquals(0, res);
+  }
+
+  @Test
+  public void standaloneTestLargeKey() throws Exception {
+    List<String> args = new ArrayList<>();
+    args.add("-validateWrites");
+    args.add("-numOfVolumes");
+    args.add("1");
+    args.add("-numOfBuckets");
+    args.add("1");
+    args.add("-numOfKeys");
+    args.add("1");
+    args.add("-keySize");
+    args.add("104857600");
+    Freon freon = new Freon(conf);
+    int res = ToolRunner.run(conf, freon,
+        args.toArray(new String[0]));
+    Assert.assertEquals(1, freon.getNumberOfVolumesCreated());
+    Assert.assertEquals(1, freon.getNumberOfBucketsCreated());
+    Assert.assertEquals(1, freon.getNumberOfKeysAdded());
+    Assert.assertEquals(0, freon.getUnsuccessfulValidationCount());
+    Assert.assertEquals(0, res);
+  }
+
+  @Test
+  public void validateWriteTest() throws Exception {
+    PrintStream originalStream = System.out;
+    ByteArrayOutputStream outStream = new ByteArrayOutputStream();
+    System.setOut(new PrintStream(outStream));
+    List<String> args = new ArrayList<>();
+    args.add("-validateWrites");
+    args.add("-numOfVolumes");
+    args.add("2");
+    args.add("-numOfBuckets");
+    args.add("5");
+    args.add("-numOfKeys");
+    args.add("10");
+    Freon freon = new Freon(conf);
+    int res = ToolRunner.run(conf, freon,
+        args.toArray(new String[0]));
+    Assert.assertEquals(0, res);
+    Assert.assertEquals(2, freon.getNumberOfVolumesCreated());
+    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
+    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
+    Assert.assertTrue(freon.getValidateWrites());
+    Assert.assertNotEquals(0, freon.getTotalKeysValidated());
+    Assert.assertNotEquals(0, freon.getSuccessfulValidationCount());
+    Assert.assertEquals(0, freon.getUnsuccessfulValidationCount());
+    System.setOut(originalStream);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java
new file mode 100644
index 0000000..07892b5
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Tests Freon, with MiniOzoneCluster.
+ */
+public class TestFreon {
+
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = new MiniOzoneClassicCluster.Builder(conf)
+        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
+        .numDataNodes(5).build();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void defaultTest() throws Exception {
+    List<String> args = new ArrayList<>();
+    args.add("-numOfVolumes");
+    args.add("2");
+    args.add("-numOfBuckets");
+    args.add("5");
+    args.add("-numOfKeys");
+    args.add("10");
+    Freon freon = new Freon(conf);
+    int res = ToolRunner.run(conf, freon,
+        args.toArray(new String[0]));
+    Assert.assertEquals(2, freon.getNumberOfVolumesCreated());
+    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
+    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
+    Assert.assertEquals(10240 - 36, freon.getKeyValueLength());
+    Assert.assertEquals(0, res);
+  }
+
+  @Test
+  public void multiThread() throws Exception {
+    List<String> args = new ArrayList<>();
+    args.add("-numOfVolumes");
+    args.add("10");
+    args.add("-numOfBuckets");
+    args.add("1");
+    args.add("-numOfKeys");
+    args.add("10");
+    args.add("-numOfThread");
+    args.add("10");
+    args.add("-keySize");
+    args.add("10240");
+    Freon freon = new Freon(conf);
+    int res = ToolRunner.run(conf, freon,
+        args.toArray(new String[0]));
+    Assert.assertEquals(10, freon.getNumberOfVolumesCreated());
+    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
+    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
+    Assert.assertEquals(0, res);
+  }
+
+  @Test
+  public void ratisTest3() throws Exception {
+    List<String> args = new ArrayList<>();
+    args.add("-numOfVolumes");
+    args.add("10");
+    args.add("-numOfBuckets");
+    args.add("1");
+    args.add("-numOfKeys");
+    args.add("10");
+    args.add("-ratis");
+    args.add("3");
+    args.add("-numOfThread");
+    args.add("10");
+    args.add("-keySize");
+    args.add("10240");
+    Freon freon = new Freon(conf);
+    int res = ToolRunner.run(conf, freon,
+        args.toArray(new String[0]));
+    Assert.assertEquals(10, freon.getNumberOfVolumesCreated());
+    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
+    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
+    Assert.assertEquals(0, res);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
new file mode 100644
index 0000000..13d86ab
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.freon;
+/**
+ * Classes related to Ozone tools tests.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/BenchmarkContainerStateMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/BenchmarkContainerStateMap.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/BenchmarkContainerStateMap.java
deleted file mode 100644
index 5f13537..0000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/BenchmarkContainerStateMap.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.scm.container.ContainerStates;
-
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
-import org.apache.hadoop.ozone.scm.exceptions.SCMException;
-import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.util.Time;
-import org.junit.Test;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.infra.Blackhole;
-import org.openjdk.jmh.runner.Runner;
-import org.openjdk.jmh.runner.RunnerException;
-import org.openjdk.jmh.runner.options.Options;
-import org.openjdk.jmh.runner.options.OptionsBuilder;
-import org.openjdk.jmh.runner.options.TimeValue;
-
-import java.io.IOException;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos
-    .LifeCycleState.OPEN;
-import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos
-    .ReplicationFactor.ONE;
-import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos
-    .ReplicationType.STAND_ALONE;
-
-public class BenchmarkContainerStateMap {
-  @Test
-  public void testRunBenchMarks() throws RunnerException {
-    Options opt = new OptionsBuilder()
-        .include(this.getClass().getName() + ".*")
-        .mode(Mode.Throughput)
-        .timeUnit(TimeUnit.SECONDS)
-        .warmupTime(TimeValue.seconds(1))
-        .warmupIterations(2)
-        .measurementTime(TimeValue.seconds(1))
-        .measurementIterations(2)
-        .threads(2)
-        .forks(1)
-        .shouldFailOnError(true)
-        .shouldDoGC(true)
-        .build();
-    new Runner(opt).run();
-  }
-
-  @Benchmark
-  public void createContainerBenchMark(BenchmarkState state, Blackhole bh)
-      throws IOException {
-    Pipeline pipeline = ContainerTestHelper
-        .createSingleNodePipeline(UUID.randomUUID().toString());
-    int cid = state.containerID.incrementAndGet();
-    ContainerInfo containerInfo = new ContainerInfo.Builder()
-        .setContainerName(pipeline.getContainerName())
-        .setState(HdslProtos.LifeCycleState.CLOSED)
-        .setPipeline(null)
-        // This is bytes allocated for blocks inside container, not the
-        // container size
-        .setAllocatedBytes(0)
-        .setUsedBytes(0)
-        .setNumberOfKeys(0)
-        .setStateEnterTime(Time.monotonicNow())
-        .setOwner("OZONE")
-        .setContainerID(cid)
-        .build();
-    state.stateMap.addContainer(containerInfo);
-  }
-
-  @Benchmark
-  public void getMatchingContainerBenchMark(BenchmarkState state,
-      Blackhole bh) {
-    state.stateMap.getMatchingContainerIDs(OPEN, "BILBO", ONE, STAND_ALONE);
-  }
-
-  @State(Scope.Thread)
-  public static class BenchmarkState {
-    public ContainerStateMap stateMap;
-    public AtomicInteger containerID;
-
-    @Setup(Level.Trial)
-    public void initialize() throws IOException {
-      stateMap = new ContainerStateMap();
-      Pipeline pipeline = ContainerTestHelper
-          .createSingleNodePipeline(UUID.randomUUID().toString());
-
-
-      int currentCount = 1;
-      for (int x = 1; x < 1000 * 1000; x++) {
-        try {
-          ContainerInfo containerInfo = new ContainerInfo.Builder()
-              .setContainerName(pipeline.getContainerName())
-              .setState(HdslProtos.LifeCycleState.CLOSED)
-              .setPipeline(null)
-              // This is bytes allocated for blocks inside container, not the
-              // container size
-              .setAllocatedBytes(0)
-              .setUsedBytes(0)
-              .setNumberOfKeys(0)
-              .setStateEnterTime(Time.monotonicNow())
-              .setOwner("OZONE")
-              .setContainerID(x)
-              .build();
-          stateMap.addContainer(containerInfo);
-          currentCount++;
-        } catch (SCMException e) {
-          e.printStackTrace();
-        }
-      }
-      for (int y = currentCount; y < 2000; y++) {
-        try {
-          ContainerInfo containerInfo = new ContainerInfo.Builder()
-              .setContainerName(pipeline.getContainerName())
-              .setState(HdslProtos.LifeCycleState.OPEN)
-              .setPipeline(null)
-              // This is bytes allocated for blocks inside container, not the
-              // container size
-              .setAllocatedBytes(0)
-              .setUsedBytes(0)
-              .setNumberOfKeys(0)
-              .setStateEnterTime(Time.monotonicNow())
-              .setOwner("OZONE")
-              .setContainerID(y)
-              .build();
-          stateMap.addContainer(containerInfo);
-          currentCount++;
-        } catch (SCMException e) {
-          e.printStackTrace();
-        }
-
-      }
-      try {
-
-        ContainerInfo containerInfo = new ContainerInfo.Builder()
-            .setContainerName(pipeline.getContainerName())
-            .setState(HdslProtos.LifeCycleState.OPEN)
-            .setPipeline(null)
-            // This is bytes allocated for blocks inside container, not the
-            // container size
-            .setAllocatedBytes(0)
-            .setUsedBytes(0)
-            .setNumberOfKeys(0)
-            .setStateEnterTime(Time.monotonicNow())
-            .setOwner("OZONE")
-            .setContainerID(currentCount++)
-            .build();
-        stateMap.addContainer(containerInfo);
-      } catch (SCMException e) {
-        e.printStackTrace();
-      }
-
-      containerID = new AtomicInteger(currentCount++);
-
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/TestContainerStateMap.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/TestContainerStateMap.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/TestContainerStateMap.java
deleted file mode 100644
index 516f3a3..0000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/TestContainerStateMap.java
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- *
- */
-
-package org.apache.hadoop.ozone.scm.container.ContainerStates;
-
-import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.hdsl.protocol.proto.HdslProtos;
-import org.apache.hadoop.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.SortedSet;
-import java.util.UUID;
-
-import static 
org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.CLOSED;
-import static 
org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN;
-import static 
org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor.ONE;
-import static 
org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType.STAND_ALONE;
-
-public class TestContainerStateMap {
-  @Test
-  public void testLifeCyleStates() throws IOException {
-    ContainerStateMap stateMap = new ContainerStateMap();
-    int currentCount = 1;
-    Pipeline pipeline = ContainerTestHelper
-        .createSingleNodePipeline(UUID.randomUUID().toString());
-    for (int x = 1; x < 1001; x++) {
-      ContainerInfo containerInfo = new ContainerInfo.Builder()
-          .setContainerName(pipeline.getContainerName())
-          .setState(HdslProtos.LifeCycleState.OPEN)
-          .setPipeline(pipeline)
-          .setAllocatedBytes(0)
-          .setUsedBytes(0)
-          .setNumberOfKeys(0)
-          .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
-          .setContainerID(x)
-          .build();
-      stateMap.addContainer(containerInfo);
-      currentCount++;
-    }
-
-    SortedSet<ContainerID> openSet = stateMap.getMatchingContainerIDs(OPEN,
-        "OZONE", ONE, STAND_ALONE);
-    Assert.assertEquals(1000, openSet.size());
-
-    int nextMax = currentCount + 1000;
-    for (int y = currentCount; y < nextMax; y++) {
-      ContainerInfo containerInfo = new ContainerInfo.Builder()
-          .setContainerName(pipeline.getContainerName())
-          .setState(HdslProtos.LifeCycleState.CLOSED)
-          .setPipeline(pipeline)
-          .setAllocatedBytes(0)
-          .setUsedBytes(0)
-          .setNumberOfKeys(0)
-          .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
-          .setContainerID(y)
-          .build();
-      stateMap.addContainer(containerInfo);
-      currentCount++;
-    }
-
-    openSet = stateMap.getMatchingContainerIDs(OPEN, "OZONE",
-        ONE, STAND_ALONE);
-    SortedSet<ContainerID> closeSet = stateMap.getMatchingContainerIDs(CLOSED,
-        "OZONE", ONE, STAND_ALONE);
-
-    // Assert that open is still 1000 and we added 1000 more closed containers.
-    Assert.assertEquals(1000, openSet.size());
-    Assert.assertEquals(1000, closeSet.size());
-
-    SortedSet<ContainerID> ownerSet = stateMap.getContainerIDsByOwner("OZONE");
-
-    // Ozone owns 1000 open and 1000 closed containers.
-    Assert.assertEquals(2000, ownerSet.size());
-  }
-
-  @Test
-  public void testGetMatchingContainers() throws IOException {
-    ContainerStateMap stateMap = new ContainerStateMap();
-    Pipeline pipeline = ContainerTestHelper
-        .createSingleNodePipeline(UUID.randomUUID().toString());
-
-    int currentCount = 1;
-    for (int x = 1; x < 1001; x++) {
-      ContainerInfo containerInfo = new ContainerInfo.Builder()
-          .setContainerName(pipeline.getContainerName())
-          .setState(HdslProtos.LifeCycleState.OPEN)
-          .setPipeline(pipeline)
-          .setAllocatedBytes(0)
-          .setUsedBytes(0)
-          .setNumberOfKeys(0)
-          .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
-          .setContainerID(x)
-          .build();
-      stateMap.addContainer(containerInfo);
-      currentCount++;
-    }
-    SortedSet<ContainerID> openSet = stateMap.getMatchingContainerIDs(OPEN,
-        "OZONE", ONE, STAND_ALONE);
-    Assert.assertEquals(1000, openSet.size());
-    int nextMax = currentCount + 200;
-    for (int y = currentCount; y < nextMax; y++) {
-      ContainerInfo containerInfo = new ContainerInfo.Builder()
-          .setContainerName(pipeline.getContainerName())
-          .setState(HdslProtos.LifeCycleState.CLOSED)
-          .setPipeline(pipeline)
-          .setAllocatedBytes(0)
-          .setUsedBytes(0)
-          .setNumberOfKeys(0)
-          .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
-          .setContainerID(y)
-          .build();
-      stateMap.addContainer(containerInfo);
-      currentCount++;
-    }
-
-    nextMax = currentCount + 30000;
-    for (int z = currentCount; z < nextMax; z++) {
-      ContainerInfo containerInfo = new ContainerInfo.Builder()
-          .setContainerName(pipeline.getContainerName())
-          .setState(HdslProtos.LifeCycleState.OPEN)
-          .setPipeline(pipeline)
-          .setAllocatedBytes(0)
-          .setUsedBytes(0)
-          .setNumberOfKeys(0)
-          .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
-          .setContainerID(z)
-          .build();
-      stateMap.addContainer(containerInfo);
-      currentCount++;
-    }
-    // At this point, if we get all Open Containers that belong to Ozone,
-    // with one replica and standalone replica strategy -- we should get
-    // 1000 + 30000.
-
-    openSet = stateMap.getMatchingContainerIDs(OPEN,
-        "OZONE", ONE, STAND_ALONE);
-    Assert.assertEquals(1000 + 30000, openSet.size());
-
-
-    // There is no such owner, so should be a set of zero size.
-    SortedSet<ContainerID> zeroSet = stateMap.getMatchingContainerIDs(OPEN,
-        "BILBO", ONE, STAND_ALONE);
-    Assert.assertEquals(0, zeroSet.size());
-    int nextId = currentCount++;
-    ContainerInfo containerInfo = new ContainerInfo.Builder()
-        .setContainerName(pipeline.getContainerName())
-        .setState(HdslProtos.LifeCycleState.OPEN)
-        .setPipeline(pipeline)
-        .setAllocatedBytes(0)
-        .setUsedBytes(0)
-        .setNumberOfKeys(0)
-        .setStateEnterTime(Time.monotonicNow())
-        .setOwner("BILBO")
-        .setContainerID(nextId)
-        .build();
-
-    stateMap.addContainer(containerInfo);
-    zeroSet = stateMap.getMatchingContainerIDs(OPEN,
-        "BILBO", ONE, STAND_ALONE);
-    Assert.assertEquals(1, zeroSet.size());
-
-    // Assert that the container we got back is the nextID itself.
-    Assert.assertTrue(zeroSet.contains(new ContainerID(nextId)));
-  }
-
-  @Test
-  public void testUpdateState() throws IOException {
-    ContainerStateMap stateMap = new ContainerStateMap();
-    Pipeline pipeline = ContainerTestHelper
-        .createSingleNodePipeline(UUID.randomUUID().toString());
-
-    ContainerInfo containerInfo = null;
-    int currentCount = 1;
-    for (int x = 1; x < 1001; x++) {
-      containerInfo = new ContainerInfo.Builder()
-          .setContainerName(pipeline.getContainerName())
-          .setState(HdslProtos.LifeCycleState.OPEN)
-          .setPipeline(pipeline)
-          .setAllocatedBytes(0)
-          .setUsedBytes(0)
-          .setNumberOfKeys(0)
-          .setStateEnterTime(Time.monotonicNow())
-          .setOwner("OZONE")
-          .setContainerID(x)
-          .build();
-
-
-      stateMap.addContainer(containerInfo);
-      currentCount++;
-    }
-
-    stateMap.updateState(containerInfo, OPEN, CLOSED);
-    SortedSet<ContainerID> closedSet = stateMap.getMatchingContainerIDs(CLOSED,
-        "OZONE", ONE, STAND_ALONE);
-    Assert.assertEquals(1, closedSet.size());
-    Assert.assertTrue(closedSet.contains(containerInfo.containerID()));
-
-    SortedSet<ContainerID> openSet = stateMap.getMatchingContainerIDs(OPEN,
-        "OZONE", ONE, STAND_ALONE);
-    Assert.assertEquals(999, openSet.size());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestDataValidate.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestDataValidate.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestDataValidate.java
deleted file mode 100644
index 8e5861a..0000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestDataValidate.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.tools;
-
-import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Tests Freon, with MiniOzoneCluster and validate data.
- */
-public class TestDataValidate {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = new MiniOzoneClassicCluster.Builder(conf)
-        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
-        .numDataNodes(5).build();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void ratisTestLargeKey() throws Exception {
-    List<String> args = new ArrayList<>();
-    args.add("-validateWrites");
-    args.add("-numOfVolumes");
-    args.add("1");
-    args.add("-numOfBuckets");
-    args.add("1");
-    args.add("-numOfKeys");
-    args.add("1");
-    args.add("-ratis");
-    args.add("3");
-    args.add("-keySize");
-    args.add("104857600");
-    Freon freon = new Freon(conf);
-    int res = ToolRunner.run(conf, freon,
-        args.toArray(new String[0]));
-    Assert.assertEquals(1, freon.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, freon.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, freon.getNumberOfKeysAdded());
-    Assert.assertEquals(0, freon.getUnsuccessfulValidationCount());
-    Assert.assertEquals(0, res);
-  }
-
-  @Test
-  public void standaloneTestLargeKey() throws Exception {
-    List<String> args = new ArrayList<>();
-    args.add("-validateWrites");
-    args.add("-numOfVolumes");
-    args.add("1");
-    args.add("-numOfBuckets");
-    args.add("1");
-    args.add("-numOfKeys");
-    args.add("1");
-    args.add("-keySize");
-    args.add("104857600");
-    Freon freon = new Freon(conf);
-    int res = ToolRunner.run(conf, freon,
-        args.toArray(new String[0]));
-    Assert.assertEquals(1, freon.getNumberOfVolumesCreated());
-    Assert.assertEquals(1, freon.getNumberOfBucketsCreated());
-    Assert.assertEquals(1, freon.getNumberOfKeysAdded());
-    Assert.assertEquals(0, freon.getUnsuccessfulValidationCount());
-    Assert.assertEquals(0, res);
-  }
-
-  @Test
-  public void validateWriteTest() throws Exception {
-    PrintStream originalStream = System.out;
-    ByteArrayOutputStream outStream = new ByteArrayOutputStream();
-    System.setOut(new PrintStream(outStream));
-    List<String> args = new ArrayList<>();
-    args.add("-validateWrites");
-    args.add("-numOfVolumes");
-    args.add("2");
-    args.add("-numOfBuckets");
-    args.add("5");
-    args.add("-numOfKeys");
-    args.add("10");
-    Freon freon = new Freon(conf);
-    int res = ToolRunner.run(conf, freon,
-        args.toArray(new String[0]));
-    Assert.assertEquals(0, res);
-    Assert.assertEquals(2, freon.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
-    Assert.assertTrue(freon.getValidateWrites());
-    Assert.assertNotEquals(0, freon.getTotalKeysValidated());
-    Assert.assertNotEquals(0, freon.getSuccessfulValidationCount());
-    Assert.assertEquals(0, freon.getUnsuccessfulValidationCount());
-    System.setOut(originalStream);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestFreon.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestFreon.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestFreon.java
deleted file mode 100644
index 2960fc0..0000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/TestFreon.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.tools;
-
-import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdsl.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Tests Freon, with MiniOzoneCluster.
- */
-public class TestFreon {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = new MiniOzoneClassicCluster.Builder(conf)
-        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
-        .numDataNodes(5).build();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void defaultTest() throws Exception {
-    List<String> args = new ArrayList<>();
-    args.add("-numOfVolumes");
-    args.add("2");
-    args.add("-numOfBuckets");
-    args.add("5");
-    args.add("-numOfKeys");
-    args.add("10");
-    Freon freon = new Freon(conf);
-    int res = ToolRunner.run(conf, freon,
-        args.toArray(new String[0]));
-    Assert.assertEquals(2, freon.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
-    Assert.assertEquals(10240 - 36, freon.getKeyValueLength());
-    Assert.assertEquals(0, res);
-  }
-
-  @Test
-  public void multiThread() throws Exception {
-    List<String> args = new ArrayList<>();
-    args.add("-numOfVolumes");
-    args.add("10");
-    args.add("-numOfBuckets");
-    args.add("1");
-    args.add("-numOfKeys");
-    args.add("10");
-    args.add("-numOfThread");
-    args.add("10");
-    args.add("-keySize");
-    args.add("10240");
-    Freon freon = new Freon(conf);
-    int res = ToolRunner.run(conf, freon,
-        args.toArray(new String[0]));
-    Assert.assertEquals(10, freon.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
-    Assert.assertEquals(0, res);
-  }
-
-  @Test
-  public void ratisTest3() throws Exception {
-    List<String> args = new ArrayList<>();
-    args.add("-numOfVolumes");
-    args.add("10");
-    args.add("-numOfBuckets");
-    args.add("1");
-    args.add("-numOfKeys");
-    args.add("10");
-    args.add("-ratis");
-    args.add("3");
-    args.add("-numOfThread");
-    args.add("10");
-    args.add("-keySize");
-    args.add("10240");
-    Freon freon = new Freon(conf);
-    int res = ToolRunner.run(conf, freon,
-        args.toArray(new String[0]));
-    Assert.assertEquals(10, freon.getNumberOfVolumesCreated());
-    Assert.assertEquals(10, freon.getNumberOfBucketsCreated());
-    Assert.assertEquals(100, freon.getNumberOfKeysAdded());
-    Assert.assertEquals(0, res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/package-info.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/package-info.java
deleted file mode 100644
index ea56345..0000000
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/tools/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.tools;
-/**
- * Classes related to Ozone tools tests.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d34288ae/hadoop-ozone/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index dcd2798..591960d 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -49,5 +49,23 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
       <artifactId>metrics-core</artifactId>
       <version>3.2.4</version>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-server-scm</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.openjdk.jmh</groupId>
+      <artifactId>jmh-core</artifactId>
+      <version>1.19</version>
+    </dependency>
+    <dependency>
+      <groupId>org.openjdk.jmh</groupId>
+      <artifactId>jmh-generator-annprocess</artifactId>
+      <version>1.19</version>
+    </dependency>
   </dependencies>
 </project>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to