http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 63f3376..0e349d3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -18,24 +18,6 @@
 
 package org.apache.hadoop.fs;
 
-import com.google.common.base.Preconditions;
-import org.apache.commons.collections.map.CaseInsensitiveMap;
-import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
-import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.BufferedReader;
@@ -71,8 +53,26 @@ import java.util.zip.CheckedOutputStream;
 import java.util.zip.GZIPInputStream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipFile;
-import java.util.zip.ZipOutputStream;
 import java.util.zip.ZipInputStream;
+import java.util.zip.ZipOutputStream;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.collections.map.CaseInsensitiveMap;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A collection of file-processing util methods.
@@ -91,9 +91,9 @@ public class FileUtil {
   public static final int SYMLINK_NO_PRIVILEGE = 2;
 
   /**
-   * Buffer size used while zipping and unzipping zip-ed archives.
+   * Buffer size for copy the content of compressed file to new file.
    */
-  private static final int BUFFER_SIZE = 8192;
+  private static final int BUFFER_SIZE = 8_192;
 
   /**
    * convert an array of FileStatus to an array of Path
@@ -613,7 +613,6 @@ public class FileUtil {
   }
 
   /**
-<<<<<<< HEAD
    * creates zip archieve of the source dir and writes a zip file.
    *
    * @param sourceDir - The directory to zip.
@@ -673,7 +672,7 @@ public class FileUtil {
   }
 
   /**
-   * Given a File input it will unzip the file in a the unzip directory
+   * Given a stream input it will unzip the it in the unzip directory.
    * passed as the second parameter
    * @param inputStream The zip file as input
    * @param toDir The unzip directory where to unzip the zip file.
@@ -731,12 +730,12 @@ public class FileUtil {
             if (!file.getParentFile().mkdirs()) {
               if (!file.getParentFile().isDirectory()) {
                 throw new IOException("Mkdirs failed to create " +
-                    file.getParentFile().toString());
+                                      file.getParentFile().toString());
               }
             }
             OutputStream out = new FileOutputStream(file);
             try {
-              byte[] buffer = new byte[BUFFER_SIZE];
+              byte[] buffer = new byte[8192];
               int i;
               while ((i = in.read(buffer)) != -1) {
                 out.write(buffer, 0, i);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 7e76099..3c31a8c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -814,7 +814,7 @@ public class RPC {
 
     static String serverNameFromClass(Class<?> clazz) {
       //The basic idea here is to handle names like
-      //org.apache.hadoop.ozone.protocol.proto.
+      //org.apache.hadoop.hdsl.protocol.proto.
       //
       // 
StorageDatanodeProtocolProtos$StorageContainerDatanodeProtocolService$2
       //where the getSimpleName is also empty

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 7b885c9..4f68ace 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -68,6 +68,44 @@
       <artifactId>hadoop-client-integration-tests</artifactId>
       <scope>provided</scope>
     </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-ozone-manager</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-server-scm</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-tools</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-cblock-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-container-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-objectstore-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdsl-tools</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-tools</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-cblock-tools</artifactId>
+    </dependency>
+
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/cblock/docker-compose.yaml 
b/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
index fefb37f..b88514e 100644
--- a/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/cblock/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
    namenode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       hostname: namenode
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
@@ -29,36 +29,38 @@ services:
          - ./docker-config
       command: ["/opt/hadoop/bin/hdfs","namenode"]
    datanode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
         - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
         - 9864
-      command: ["/opt/hadoop/bin/hdfs","datanode"]
+      command: ["/opt/hadoop/bin/oz","datanode"]
+      env_file:
+         - ./docker-config
    jscsi:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       ports:
         - 3260:3260
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","jscsi"]
+      command: ["/opt/hadoop/bin/oz","jscsi"]
    cblock:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","cblockserver"]
+      command: ["/opt/hadoop/bin/oz","cblockserver"]
    scm:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
          - 9876:9876
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","scm"]
+      command: ["/opt/hadoop/bin/oz","scm"]
       environment:
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-dist/src/main/compose/cblock/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/cblock/docker-config 
b/hadoop-dist/src/main/compose/cblock/docker-config
index 6851bc6..95917fd 100644
--- a/hadoop-dist/src/main/compose/cblock/docker-config
+++ b/hadoop-dist/src/main/compose/cblock/docker-config
@@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
 OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
 OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
-
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HdslServerPlugin,org.apache.hadoop.ozone.web.ObjectStoreRestPlugin
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml 
b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index 02713c7..f2b263c 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -17,7 +17,7 @@
 version: "3"
 services:
    namenode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       hostname: namenode
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
@@ -29,14 +29,16 @@ services:
          - ./docker-config
       command: ["/opt/hadoop/bin/hdfs","namenode"]
    datanode:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
         - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
         - 9864
-      command: ["/opt/hadoop/bin/hdfs","datanode"]
+      command: ["/opt/hadoop/bin/oz","datanode"]
+      env_file:
+        - ./docker-config
    ksm:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
@@ -45,9 +47,9 @@ services:
          ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/hdfs","ksm"]
+      command: ["/opt/hadoop/bin/oz","ksm"]
    scm:
-      image: elek/hadoop-runner:latest
+      image: elek/hadoop-runner:o3-refactor
       volumes:
          - ../..//hadoop-${VERSION}:/opt/hadoop
       ports:
@@ -56,4 +58,4 @@ services:
           - ./docker-config
       environment:
           ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
-      command: ["/opt/hadoop/bin/hdfs","scm"]
+      command: ["/opt/hadoop/bin/oz","scm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-dist/src/main/compose/ozone/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index c3f3663..20c1e30 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -27,6 +27,7 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HdslServerPlugin,org.apache.hadoop.ozone.web.ObjectStoreRestPlugin
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
 LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 5503cb2..8e2bc94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -63,13 +63,6 @@
     <Bug pattern="IS2_INCONSISTENT_SYNC" />
   </Match>
 
-  <Match>
-    <Package name="org.apache.hadoop.ozone.protocol.proto" />
-  </Match>
-  <Match>
-    <Package name="org.apache.hadoop.hdfs.ozone.protocol.proto" />
-  </Match>
-
   <!-- BlockLocations are user-facing, but LocatedBlocks are not. -->
   <Match>
     <Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
@@ -98,4 +91,5 @@
     <Method name="getSymlinkInBytes" />
     <Bug pattern="EI_EXPOSE_REP" />
   </Match>
+
 </FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 19d8113..a5ed7a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -64,6 +64,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
       <scope>test</scope>
     </dependency>
     <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-all</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>org.mock-server</groupId>
       <artifactId>mockserver-netty</artifactId>
       <scope>test</scope>
@@ -100,37 +105,14 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
       <scope>test</scope>
       <type>test-jar</type>
     </dependency>
-    <dependency>
-      <groupId>com.fasterxml.jackson.core</groupId>
-      <artifactId>jackson-annotations</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-all</artifactId>
-    </dependency>
+      <dependency>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-annotations</artifactId>
+      </dependency>
     <dependency>
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-databind</artifactId>
     </dependency>
-
-    <dependency>
-      <artifactId>ratis-server</artifactId>
-      <groupId>org.apache.ratis</groupId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.slf4j</groupId>
-          <artifactId>slf4j-log4j12</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-netty</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
-    <dependency>
-      <artifactId>ratis-grpc</artifactId>
-      <groupId>org.apache.ratis</groupId>
-    </dependency>
   </dependencies>
 
   <build>
@@ -177,11 +159,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
                   <include>inotify.proto</include>
                   <include>erasurecoding.proto</include>
                   <include>ReconfigurationProtocol.proto</include>
-                  <include>StorageContainerLocationProtocol.proto</include>
-                  <include>DatanodeContainerProtocol.proto</include>
-                  <include>Ozone.proto</include>
-                  <include>KeySpaceManagerProtocol.proto</include>
-                  <include>ScmBlockLocationProtocol.proto</include>
                 </includes>
               </source>
             </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
deleted file mode 100644
index fde728c..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/CBlockConfigKeys.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.cblock;
-
-import static java.lang.Thread.NORM_PRIORITY;
-
-/**
- * This class contains constants for configuration keys used in CBlock.
- */
-public final class CBlockConfigKeys {
-  public static final String DFS_CBLOCK_SERVICERPC_ADDRESS_KEY =
-      "dfs.cblock.servicerpc-address";
-  public static final int DFS_CBLOCK_SERVICERPC_PORT_DEFAULT =
-      9810;
-  public static final String DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT =
-      "0.0.0.0";
-
-  public static final String DFS_CBLOCK_JSCSIRPC_ADDRESS_KEY =
-      "dfs.cblock.jscsi-address";
-
-  //The port on CBlockManager node for jSCSI to ask
-  public static final String DFS_CBLOCK_JSCSI_PORT_KEY =
-      "dfs.cblock.jscsi.port";
-  public static final int DFS_CBLOCK_JSCSI_PORT_DEFAULT =
-      9811;
-
-  public static final String DFS_CBLOCK_SERVICERPC_BIND_HOST_KEY =
-      "dfs.cblock.service.rpc-bind-host";
-  public static final String DFS_CBLOCK_JSCSIRPC_BIND_HOST_KEY =
-      "dfs.cblock.jscsi.rpc-bind-host";
-
-  // default block size is 4KB
-  public static final int DFS_CBLOCK_SERVICE_BLOCK_SIZE_DEFAULT =
-      4096;
-
-  public static final String DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_KEY =
-      "dfs.cblock.service.handler.count";
-  public static final int DFS_CBLOCK_SERVICERPC_HANDLER_COUNT_DEFAULT = 10;
-
-  public static final String DFS_CBLOCK_SERVICE_LEVELDB_PATH_KEY =
-      "dfs.cblock.service.leveldb.path";
-  //TODO : find a better place
-  public static final String DFS_CBLOCK_SERVICE_LEVELDB_PATH_DEFAULT =
-      "/tmp/cblock_levelDB.dat";
-
-
-  public static final String DFS_CBLOCK_DISK_CACHE_PATH_KEY =
-      "dfs.cblock.disk.cache.path";
-  public static final String DFS_CBLOCK_DISK_CACHE_PATH_DEFAULT =
-      "/tmp/cblockCacheDB";
-  /**
-   * Setting this flag to true makes the block layer compute a sha256 hash of
-   * the data and log that information along with block ID. This is very
-   * useful for doing trace based simulation of various workloads. Since it is
-   * computing a hash for each block this could be expensive, hence default
-   * is false.
-   */
-  public static final String DFS_CBLOCK_TRACE_IO = "dfs.cblock.trace.io";
-  public static final boolean DFS_CBLOCK_TRACE_IO_DEFAULT = false;
-
-  public static final String DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO =
-      "dfs.cblock.short.circuit.io";
-  public static final boolean DFS_CBLOCK_ENABLE_SHORT_CIRCUIT_IO_DEFAULT =
-      false;
-
-  /**
-   * Cache size in 1000s of entries. 256 indicates 256 * 1024.
-   */
-  public static final String DFS_CBLOCK_CACHE_QUEUE_SIZE_KB =
-      "dfs.cblock.cache.queue.size.in.kb";
-  public static final int DFS_CBLOCK_CACHE_QUEUE_SIZE_KB_DEFAULT = 256;
-
-  /**
-   *  Minimum Number of threads that cache pool will use for background I/O.
-   */
-  public static final String DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE =
-      "dfs.cblock.cache.core.min.pool.size";
-  public static final int DFS_CBLOCK_CACHE_CORE_MIN_POOL_SIZE_DEFAULT = 16;
-
-  /**
-   *  Maximum Number of threads that cache pool will use for background I/O.
-   */
-
-  public static final String DFS_CBLOCK_CACHE_MAX_POOL_SIZE =
-      "dfs.cblock.cache.max.pool.size";
-  public static final int DFS_CBLOCK_CACHE_MAX_POOL_SIZE_DEFAULT = 256;
-
-  /**
-   * Number of seconds to keep the Thread alive when it is idle.
-   */
-  public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE =
-      "dfs.cblock.cache.keep.alive";
-  public static final String DFS_CBLOCK_CACHE_KEEP_ALIVE_DEFAULT = "60s";
-
-  /**
-   * Priority of cache flusher thread, affecting the relative performance of
-   * write and read.
-   */
-  public static final String DFS_CBLOCK_CACHE_THREAD_PRIORITY =
-      "dfs.cblock.cache.thread.priority";
-  public static final int DFS_CBLOCK_CACHE_THREAD_PRIORITY_DEFAULT =
-      NORM_PRIORITY;
-
-  /**
-   * Block Buffer size in terms of blockID entries, 512 means 512 blockIDs.
-   */
-  public static final String DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE =
-      "dfs.cblock.cache.block.buffer.size";
-  public static final int DFS_CBLOCK_CACHE_BLOCK_BUFFER_SIZE_DEFAULT = 512;
-
-  public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL =
-      "dfs.cblock.block.buffer.flush.interval";
-  public static final String DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT =
-      "60s";
-
-  // jscsi server settings
-  public static final String DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY =
-      "dfs.cblock.jscsi.server.address";
-  public static final String DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT =
-      "0.0.0.0";
-  public static final String DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_KEY =
-      "dfs.cblock.jscsi.cblock.server.address";
-  public static final String DFS_CBLOCK_JSCSI_CBLOCK_SERVER_ADDRESS_DEFAULT =
-      "127.0.0.1";
-
-  // to what address cblock server should talk to scm?
-  public static final String DFS_CBLOCK_SCM_IPADDRESS_KEY =
-      "dfs.cblock.scm.ipaddress";
-  public static final String DFS_CBLOCK_SCM_IPADDRESS_DEFAULT =
-      "127.0.0.1";
-  public static final String DFS_CBLOCK_SCM_PORT_KEY =
-      "dfs.cblock.scm.port";
-  public static final int DFS_CBLOCK_SCM_PORT_DEFAULT = 9860;
-
-  public static final String DFS_CBLOCK_CONTAINER_SIZE_GB_KEY =
-      "dfs.cblock.container.size.gb";
-  public static final int DFS_CBLOCK_CONTAINER_SIZE_GB_DEFAULT =
-      5;
-
-  // LevelDB cache file uses an off-heap cache in LevelDB of 256 MB.
-  public static final String DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_KEY =
-      "dfs.cblock.cache.leveldb.cache.size.mb";
-  public static final int DFS_CBLOCK_CACHE_LEVELDB_CACHE_SIZE_MB_DEFAULT = 256;
-
-  /**
-   * Cache does an best case attempt to write a block to a container.
-   * At some point of time, we will need to handle the case where we did try
-   * 64K times and is till not able to write to the container.
-   *
-   * TODO: We will need cBlock Server to allow us to do a remapping of the
-   * block location in case of failures, at that point we should reduce the
-   * retry count to a more normal number. This is approximately 18 hours of
-   * retry.
-   */
-  public static final String DFS_CBLOCK_CACHE_MAX_RETRY_KEY =
-      "dfs.cblock.cache.max.retry";
-  public static final int DFS_CBLOCK_CACHE_MAX_RETRY_DEFAULT =
-      64 * 1024;
-
-  /**
-   * Cblock CLI configs.
-   */
-  public static final String DFS_CBLOCK_MANAGER_POOL_SIZE =
-      "dfs.cblock.manager.pool.size";
-  public static final int DFS_CBLOCK_MANAGER_POOL_SIZE_DEFAULT = 16;
-
-  /**
-   * currently the largest supported volume is about 8TB, which might take
-   * > 20 seconds to finish creating containers. thus set timeout to 30 sec.
-   */
-  public static final String DFS_CBLOCK_RPC_TIMEOUT =
-      "dfs.cblock.rpc.timeout";
-  public static final String DFS_CBLOCK_RPC_TIMEOUT_DEFAULT = "300s";
-
-  public static final String DFS_CBLOCK_ISCSI_ADVERTISED_IP =
-      "dfs.cblock.iscsi.advertised.ip";
-
-  public static final String DFS_CBLOCK_ISCSI_ADVERTISED_PORT =
-      "dfs.cblock.iscsi.advertised.port";
-
-  public static final int DFS_CBLOCK_ISCSI_ADVERTISED_PORT_DEFAULT = 3260;
-
-
-  public static final String
-      DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED
-        = "dfs.cblock.kubernetes.dynamic-provisioner.enabled";
-
-  public static final boolean
-      DFS_CBLOCK_KUBERNETES_DYNAMIC_PROVISIONER_ENABLED_DEFAULT = false;
-
-  public static final String
-      DFS_CBLOCK_KUBERNETES_CBLOCK_USER =
-         "dfs.cblock.kubernetes.cblock-user";
-
-  public static final String
-      DFS_CBLOCK_KUBERNETES_CBLOCK_USER_DEFAULT =
-         "iqn.2001-04.org.apache.hadoop";
-
-  public static final String
-      DFS_CBLOCK_KUBERNETES_CONFIG_FILE_KEY =
-         "dfs.cblock.kubernetes.configfile";
-
-  private CBlockConfigKeys() {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/package-info.java
deleted file mode 100644
index 5f5d3cd..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/cblock/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.cblock;
-
-/**
- This package contains ozone client side libraries.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
index 517e474..96f22ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ *     http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -54,6 +54,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
   private String xferAddr;
   private int containerPort; // container Stand_alone Rpc port.
   private int ratisPort; // Container Ratis RPC Port.
+  private int ozoneRestPort;
 
   /**
    * UUID identifying a given datanode. For upgraded Datanodes this is the
@@ -75,6 +76,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
         from.getInfoPort(),
         from.getInfoSecurePort(),
         from.getIpcPort());
+    this.ozoneRestPort = from.getOzoneRestPort();
     this.peerHostName = from.getPeerHostName();
   }
 
@@ -267,6 +269,8 @@ public class DatanodeID implements Comparable<DatanodeID> {
     infoPort = nodeReg.getInfoPort();
     infoSecurePort = nodeReg.getInfoSecurePort();
     ipcPort = nodeReg.getIpcPort();
+    ratisPort = nodeReg.getRatisPort();
+    ozoneRestPort = nodeReg.getOzoneRestPort();
   }
 
   /**
@@ -313,6 +317,24 @@ public class DatanodeID implements Comparable<DatanodeID> {
   }
 
   /**
+   * Ozone rest port.
+   *
+   * @return rest port.
+   */
+  public int getOzoneRestPort() {
+    return ozoneRestPort;
+  }
+
+  /**
+   * Set the ozone rest port.
+   *
+   * @param ozoneRestPort
+   */
+  public void setOzoneRestPort(int ozoneRestPort) {
+    this.ozoneRestPort = ozoneRestPort;
+  }
+
+  /**
    * Returns a DataNode ID from the protocol buffers.
    *
    * @param datanodeIDProto - protoBuf Message
@@ -326,6 +348,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
         datanodeIDProto.getInfoSecurePort(), datanodeIDProto.getIpcPort());
     id.setContainerPort(datanodeIDProto.getContainerPort());
     id.setRatisPort(datanodeIDProto.getRatisPort());
+    id.setOzoneRestPort(datanodeIDProto.getOzoneRestPort());
     return id;
   }
 
@@ -345,6 +368,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
         .setIpcPort(this.getIpcPort())
         .setContainerPort(this.getContainerPort())
         .setRatisPort(this.getRatisPort())
+        .setOzoneRestPort(this.getOzoneRestPort())
         .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index d9e7aa0..e94b094 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -292,7 +292,9 @@ public class PBHelperClient {
             dn.getDatanodeUuid() : "")
         .setInfoPort(dn.getInfoPort())
         .setInfoSecurePort(dn.getInfoSecurePort())
-        .setIpcPort(dn.getIpcPort()).build();
+        .setIpcPort(dn.getIpcPort())
+        .setOzoneRestPort(dn.getOzoneRestPort())
+        .build();
   }
 
   public static DatanodeInfoProto.AdminState convert(
@@ -742,9 +744,13 @@ public class PBHelperClient {
 
   // DatanodeId
   public static DatanodeID convert(DatanodeIDProto dn) {
-    return new DatanodeID(dn.getIpAddr(), dn.getHostName(),
-        dn.getDatanodeUuid(), dn.getXferPort(), dn.getInfoPort(),
-        dn.hasInfoSecurePort() ? dn.getInfoSecurePort() : 0, dn.getIpcPort());
+    DatanodeID datanodeID =
+        new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(),
+            dn.getXferPort(), dn.getInfoPort(),
+            dn.hasInfoSecurePort() ? dn.getInfoSecurePort() : 0,
+            dn.getIpcPort());
+    datanodeID.setOzoneRestPort(dn.getOzoneRestPort());
+    return datanodeID;
   }
 
   public static AdminStates convert(AdminState adminState) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
deleted file mode 100644
index ff0ac4e..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-package org.apache.hadoop.ozone;
-
-import java.util.Objects;
-
-/**
- * OzoneACL classes define bucket ACLs used in OZONE.
- *
- * ACLs in Ozone follow this pattern.
- * • user:name:rw
- * • group:name:rw
- * • world::rw
- */
-public class OzoneAcl {
-  private OzoneACLType type;
-  private String name;
-  private OzoneACLRights rights;
-
-  /**
-   * Constructor for OzoneAcl.
-   */
-  public OzoneAcl() {
-  }
-
-  /**
-   * Constructor for OzoneAcl.
-   *
-   * @param type - Type
-   * @param name - Name of user
-   * @param rights - Rights
-   */
-  public OzoneAcl(OzoneACLType type, String name, OzoneACLRights rights) {
-    this.name = name;
-    this.rights = rights;
-    this.type = type;
-    if (type == OzoneACLType.WORLD && name.length() != 0) {
-      throw new IllegalArgumentException("Unexpected name part in world type");
-    }
-    if (((type == OzoneACLType.USER) || (type == OzoneACLType.GROUP))
-        && (name.length() == 0)) {
-      throw new IllegalArgumentException("User or group name is required");
-    }
-  }
-
-  /**
-   * Parses an ACL string and returns the ACL object.
-   *
-   * @param acl - Acl String , Ex. user:anu:rw
-   *
-   * @return - Ozone ACLs
-   */
-  public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException {
-    if ((acl == null) || acl.isEmpty()) {
-      throw new IllegalArgumentException("ACLs cannot be null or empty");
-    }
-    String[] parts = acl.trim().split(":");
-    if (parts.length < 3) {
-      throw new IllegalArgumentException("ACLs are not in expected format");
-    }
-
-    OzoneACLType aclType = OzoneACLType.valueOf(parts[0].toUpperCase());
-    OzoneACLRights rights = OzoneACLRights.getACLRight(parts[2].toLowerCase());
-
-    // TODO : Support sanitation of these user names by calling into
-    // userAuth Interface.
-    return new OzoneAcl(aclType, parts[1], rights);
-  }
-
-  @Override
-  public String toString() {
-    return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights);
-  }
-
-  /**
-   * Returns a hash code value for the object. This method is
-   * supported for the benefit of hash tables.
-   *
-   * @return a hash code value for this object.
-   *
-   * @see Object#equals(Object)
-   * @see System#identityHashCode
-   */
-  @Override
-  public int hashCode() {
-    return Objects.hash(this.getName(), this.getRights().toString(),
-                        this.getType().toString());
-  }
-
-  /**
-   * Returns name.
-   *
-   * @return name
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns Rights.
-   *
-   * @return - Rights
-   */
-  public OzoneACLRights getRights() {
-    return rights;
-  }
-
-  /**
-   * Returns Type.
-   *
-   * @return type
-   */
-  public OzoneACLType getType() {
-    return type;
-  }
-
-  /**
-   * Indicates whether some other object is "equal to" this one.
-   *
-   * @param obj the reference object with which to compare.
-   *
-   * @return {@code true} if this object is the same as the obj
-   * argument; {@code false} otherwise.
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    OzoneAcl otherAcl = (OzoneAcl) obj;
-    return otherAcl.getName().equals(this.getName()) &&
-        otherAcl.getRights() == this.getRights() &&
-        otherAcl.getType() == this.getType();
-  }
-
-  /**
-   * ACL types.
-   */
-  public enum OzoneACLType {
-    USER(OzoneConsts.OZONE_ACL_USER_TYPE),
-    GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
-    WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE);
-
-    /**
-     * String value for this Enum.
-     */
-    private final String value;
-
-    /**
-     * Init OzoneACLtypes enum.
-     *
-     * @param val String type for this enum.
-     */
-    OzoneACLType(String val) {
-      value = val;
-    }
-  }
-
-  /**
-   * ACL rights.
-   */
-  public enum OzoneACLRights {
-    READ, WRITE, READ_WRITE;
-
-    /**
-     * Returns the ACL rights based on passed in String.
-     *
-     * @param type ACL right string
-     *
-     * @return OzoneACLRights
-     */
-    public static OzoneACLRights getACLRight(String type) {
-      if (type == null || type.isEmpty()) {
-        throw new IllegalArgumentException("ACL right cannot be empty");
-      }
-
-      switch (type) {
-      case OzoneConsts.OZONE_ACL_READ:
-        return OzoneACLRights.READ;
-      case OzoneConsts.OZONE_ACL_WRITE:
-        return OzoneACLRights.WRITE;
-      case OzoneConsts.OZONE_ACL_READ_WRITE:
-      case OzoneConsts.OZONE_ACL_WRITE_READ:
-        return OzoneACLRights.READ_WRITE;
-      default:
-        throw new IllegalArgumentException("ACL right is not recognized");
-      }
-
-    }
-
-    /**
-     * Returns String representation of ACL rights.
-     * @param acl OzoneACLRights
-     * @return String representation of acl
-     */
-    public static String getACLRightsString(OzoneACLRights acl) {
-      switch(acl) {
-      case READ:
-        return OzoneConsts.OZONE_ACL_READ;
-      case WRITE:
-        return OzoneConsts.OZONE_ACL_WRITE;
-      case READ_WRITE:
-        return OzoneConsts.OZONE_ACL_READ_WRITE;
-      default:
-        throw new IllegalArgumentException("ACL right is not recognized");
-      }
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
deleted file mode 100644
index b3b35f1..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ /dev/null
@@ -1,245 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ozone.client.ReplicationFactor;
-import org.apache.hadoop.ozone.client.ReplicationType;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.RestClient;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.scm.ScmConfigKeys;
-
-/**
- * This class contains constants for configuration keys used in Ozone.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class OzoneConfigKeys {
-  public static final String DFS_CONTAINER_IPC_PORT =
-      "dfs.container.ipc";
-  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
-
-  /**
-   *
-   * When set to true, allocate a random free port for ozone container,
-   * so that a mini cluster is able to launch multiple containers on a node.
-   *
-   * When set to false (default), container port is fixed as specified by
-   * DFS_CONTAINER_IPC_PORT_DEFAULT.
-   */
-  public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
-      "dfs.container.ipc.random.port";
-  public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
-      false;
-
-  /**
-   * Ratis Port where containers listen to.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_PORT =
-      "dfs.container.ratis.ipc";
-  public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
-
-  /**
-   * When set to true, allocate a random free port for ozone container, so that
-   * a mini cluster is able to launch multiple containers on a node.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
-      "dfs.container.ratis.ipc.random.port";
-  public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
-      false;
-
-  public static final String OZONE_LOCALSTORAGE_ROOT =
-      "ozone.localstorage.root";
-  public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
-  public static final String OZONE_ENABLED =
-      "ozone.enabled";
-  public static final boolean OZONE_ENABLED_DEFAULT = false;
-  public static final String OZONE_HANDLER_TYPE_KEY =
-      "ozone.handler.type";
-  public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
-  public static final String OZONE_TRACE_ENABLED_KEY =
-      "ozone.trace.enabled";
-  public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
-
-  public static final String OZONE_METADATA_DIRS =
-      "ozone.metadata.dirs";
-
-  public static final String OZONE_METADATA_STORE_IMPL =
-      "ozone.metastore.impl";
-  public static final String OZONE_METADATA_STORE_IMPL_LEVELDB =
-      "LevelDB";
-  public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB =
-      "RocksDB";
-  public static final String OZONE_METADATA_STORE_IMPL_DEFAULT =
-      OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
-      "ozone.metastore.rocksdb.statistics";
-
-  public static final String  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT =
-      "ALL";
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF =
-      "OFF";
-
-  public static final String OZONE_CONTAINER_CACHE_SIZE =
-      "ozone.container.cache.size";
-  public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024;
-
-  public static final String OZONE_SCM_BLOCK_SIZE_IN_MB =
-      "ozone.scm.block.size.in.mb";
-  public static final long OZONE_SCM_BLOCK_SIZE_DEFAULT = 256;
-
-  /**
-   * Ozone administrator users delimited by comma.
-   * If not set, only the user who launches an ozone service will be the
-   * admin user. This property must be set if ozone services are started by
-   * different users. Otherwise the RPC layer will reject calls from
-   * other servers which are started by users not in the list.
-   * */
-  public static final String OZONE_ADMINISTRATORS =
-      "ozone.administrators";
-
-  public static final String OZONE_CLIENT_PROTOCOL =
-      "ozone.client.protocol";
-  public static final Class<? extends ClientProtocol>
-      OZONE_CLIENT_PROTOCOL_RPC = RpcClient.class;
-  public static final Class<? extends ClientProtocol>
-      OZONE_CLIENT_PROTOCOL_REST = RestClient.class;
-
-  // This defines the overall connection limit for the connection pool used in
-  // RestClient.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX =
-      "ozone.rest.client.http.connection.max";
-  public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100;
-
-  // This defines the connection limit per one HTTP route/host.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX =
-      "ozone.rest.client.http.connection.per-route.max";
-
-  public static final int
-      OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
-
-  public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
-      "ozone.client.socket.timeout";
-  public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
-  public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
-      "ozone.client.connection.timeout";
-  public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
-
-  public static final String OZONE_REPLICATION = "ozone.replication";
-  public static final int OZONE_REPLICATION_DEFAULT =
-      ReplicationFactor.THREE.getValue();
-
-  public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type";
-  public static final String OZONE_REPLICATION_TYPE_DEFAULT =
-      ReplicationType.RATIS.toString();
-
-  /**
-   * Configuration property to configure the cache size of client list calls.
-   */
-  public static final String OZONE_CLIENT_LIST_CACHE_SIZE =
-      "ozone.client.list.cache";
-  public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000;
-
-  /**
-   * Configuration properties for Ozone Block Deleting Service.
-   */
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
-      "ozone.block.deleting.service.interval";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
-      = "60s";
-
-  /**
-   * The interval of open key clean service.
-   */
-  public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS =
-      "ozone.open.key.cleanup.service.interval.seconds";
-  public static final int
-      OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT
-      = 24 * 3600; // a total of 24 hour
-
-  /**
-   * An open key gets cleaned up when it is being in open state for too long.
-   */
-  public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS =
-      "ozone.open.key.expire.threshold";
-  public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT =
-      24 * 3600;
-
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
-      "ozone.block.deleting.service.timeout";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
-      = "300s"; // 300s for default
-
-  public static final String OZONE_KEY_PREALLOCATION_MAXSIZE =
-      "ozone.key.preallocation.maxsize";
-  public static final long OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT
-      = 128 * OzoneConsts.MB;
-
-  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER =
-      "ozone.block.deleting.limit.per.task";
-  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
-      = 1000;
-
-  public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL
-      = "ozone.block.deleting.container.limit.per.interval";
-  public static final int
-      OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
-
-  public static final String OZONE_CONTAINER_REPORT_INTERVAL =
-      "ozone.container.report.interval";
-  public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
-      "60s";
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY;
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
-  public static final int DFS_CONTAINER_CHUNK_MAX_SIZE
-      = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
-  public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
-      "dfs.container.ratis.datanode.storage.dir";
-
-  public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "ozone.web.authentication.kerberos.principal";
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private OzoneConfigKeys() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
deleted file mode 100644
index 3cc4697..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * Set of constants used in Ozone implementation.
- */
-@InterfaceAudience.Private
-public final class OzoneConsts {
-  public static final String OZONE_SIMPLE_ROOT_USER = "root";
-  public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
-
-  /*
-   * BucketName length is used for both buckets and volume lengths
-   */
-  public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3;
-  public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63;
-
-  public static final String OZONE_ACL_USER_TYPE = "user";
-  public static final String OZONE_ACL_GROUP_TYPE = "group";
-  public static final String OZONE_ACL_WORLD_TYPE = "world";
-
-  public static final String OZONE_ACL_READ = "r";
-  public static final String OZONE_ACL_WRITE = "w";
-  public static final String OZONE_ACL_READ_WRITE = "rw";
-  public static final String OZONE_ACL_WRITE_READ = "wr";
-
-  public static final String OZONE_DATE_FORMAT =
-      "EEE, dd MMM yyyy HH:mm:ss zzz";
-  public static final String OZONE_TIME_ZONE = "GMT";
-
-  public static final String OZONE_COMPONENT = "component";
-  public static final String OZONE_FUNCTION  = "function";
-  public static final String OZONE_RESOURCE = "resource";
-  public static final String OZONE_USER = "user";
-  public static final String OZONE_REQUEST = "request";
-
-  public static final String CONTAINER_EXTENSION = ".container";
-  public static final String CONTAINER_META = ".meta";
-
-  //  container storage is in the following format.
-  //  Data Volume basePath/containers/<containerName>/metadata and
-  //  Data Volume basePath/containers/<containerName>/data/...
-  public static final String CONTAINER_PREFIX  = "containers";
-  public static final String CONTAINER_META_PATH = "metadata";
-  public static final String CONTAINER_DATA_PATH = "data";
-  public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
-  public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
-  public static final String CONTAINER_ROOT_PREFIX = "repository";
-
-  public static final String FILE_HASH = "SHA-256";
-  public final static String CHUNK_OVERWRITE = "OverWriteRequested";
-
-  public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
-  public static final long KB = 1024L;
-  public static final long MB = KB * 1024L;
-  public static final long GB = MB * 1024L;
-  public static final long TB = GB * 1024L;
-
-  /**
-   * level DB names used by SCM and data nodes.
-   */
-  public static final String CONTAINER_DB_SUFFIX = "container.db";
-  public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
-  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String BLOCK_DB = "block.db";
-  public static final String NODEPOOL_DB = "nodepool.db";
-  public static final String OPEN_CONTAINERS_DB = "openContainers.db";
-  public static final String DELETED_BLOCK_DB = "deletedBlock.db";
-  public static final String KSM_DB_NAME = "ksm.db";
-
-  /**
-   * Supports Bucket Versioning.
-   */
-  public enum Versioning {NOT_DEFINED, ENABLED, DISABLED}
-
-  /**
-   * Ozone handler types.
-   */
-  public static final String OZONE_HANDLER_DISTRIBUTED = "distributed";
-  public static final String OZONE_HANDLER_LOCAL = "local";
-
-  public static final String DELETING_KEY_PREFIX = "#deleting#";
-  public static final String OPEN_KEY_PREFIX = "#open#";
-  public static final String OPEN_KEY_ID_DELIMINATOR = "#";
-
-  /**
-   * KSM LevelDB prefixes.
-   *
-   * KSM DB stores metadata as KV pairs with certain prefixes,
-   * prefix is used to improve the performance to get related
-   * metadata.
-   *
-   * KSM DB Schema:
-   *  ----------------------------------------------------------
-   *  |  KEY                                     |     VALUE   |
-   *  ----------------------------------------------------------
-   *  | $userName                                |  VolumeList |
-   *  ----------------------------------------------------------
-   *  | /#volumeName                             |  VolumeInfo |
-   *  ----------------------------------------------------------
-   *  | /#volumeName/#bucketName                 |  BucketInfo |
-   *  ----------------------------------------------------------
-   *  | /volumeName/bucketName/keyName           |  KeyInfo    |
-   *  ----------------------------------------------------------
-   *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
-   *  ----------------------------------------------------------
-   */
-  public static final String KSM_VOLUME_PREFIX = "/#";
-  public static final String KSM_BUCKET_PREFIX = "/#";
-  public static final String KSM_KEY_PREFIX = "/";
-  public static final String KSM_USER_PREFIX = "$";
-
-  /**
-   * Max KSM Quota size of 1024 PB.
-   */
-  public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
-
-  /**
-   * Max number of keys returned per list buckets operation.
-   */
-  public static final int MAX_LISTBUCKETS_SIZE  = 1024;
-
-  /**
-   * Max number of keys returned per list keys operation.
-   */
-  public static final int MAX_LISTKEYS_SIZE  = 1024;
-
-  /**
-   * Max number of volumes returned per list volumes operation.
-   */
-  public static final int MAX_LISTVOLUMES_SIZE = 1024;
-
-  public static final int INVALID_PORT = -1;
-
-
-  // The ServiceListJSONServlet context attribute where KeySpaceManager
-  // instance gets stored.
-  public static final String KSM_CONTEXT_ATTRIBUTE = "ozone.ksm";
-
-  private OzoneConsts() {
-    // Never Constructed
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
deleted file mode 100644
index 39b7bb8..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-
-import java.util.List;
-
-/**
- * This class encapsulates the arguments that are
- * required for creating a bucket.
- */
-public final class BucketArgs {
-
-  /**
-   * ACL Information.
-   */
-  private List<OzoneAcl> acls;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean versioning;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param versioning Bucket version flag.
-   * @param storageType Storage type to be used.
-   * @param acls list of ACLs.
-   */
-  private BucketArgs(Boolean versioning, StorageType storageType,
-                     List<OzoneAcl> acls) {
-    this.acls = acls;
-    this.versioning = versioning;
-    this.storageType = storageType;
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public Boolean getVersioning() {
-    return versioning;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns the ACL's associated with this bucket.
-   * @return List<OzoneAcl>
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Returns new builder class that builds a KsmBucketInfo.
-   *
-   * @return Builder
-   */
-  public static BucketArgs.Builder newBuilder() {
-    return new BucketArgs.Builder();
-  }
-
-  /**
-   * Builder for KsmBucketInfo.
-   */
-  public static class Builder {
-    private Boolean versioning;
-    private StorageType storageType;
-    private List<OzoneAcl> acls;
-
-    public BucketArgs.Builder setVersioning(Boolean versionFlag) {
-      this.versioning = versionFlag;
-      return this;
-    }
-
-    public BucketArgs.Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    public BucketArgs.Builder setAcls(List<OzoneAcl> listOfAcls) {
-      this.acls = listOfAcls;
-      return this;
-    }
-
-    /**
-     * Constructs the BucketArgs.
-     * @return instance of BucketArgs.
-     */
-    public BucketArgs build() {
-      return new BucketArgs(versioning, storageType, acls);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
deleted file mode 100644
index 1ed41f5..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NoSuchElementException;
-
-/**
- * ObjectStore class is responsible for the client operations that can be
- * performed on Ozone Object Store.
- */
-public class ObjectStore {
-
-  /**
-   * The proxy used for connecting to the cluster and perform
-   * client operations.
-   */
-  private final ClientProtocol proxy;
-
-  /**
-   * Cache size to be used for listVolume calls.
-   */
-  private int listCacheSize;
-
-  /**
-   * Creates an instance of ObjectStore.
-   * @param conf Configuration object.
-   * @param proxy ClientProtocol proxy.
-   */
-  public ObjectStore(Configuration conf, ClientProtocol proxy) {
-    this.proxy = proxy;
-    this.listCacheSize = OzoneClientUtils.getListCacheSize(conf);
-  }
-
-  /**
-   * Creates the volume with default values.
-   * @param volumeName Name of the volume to be created.
-   * @throws IOException
-   */
-  public void createVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    OzoneClientUtils.verifyResourceName(volumeName);
-    proxy.createVolume(volumeName);
-  }
-
-  /**
-   * Creates the volume.
-   * @param volumeName Name of the volume to be created.
-   * @param volumeArgs Volume properties.
-   * @throws IOException
-   */
-  public void createVolume(String volumeName, VolumeArgs volumeArgs)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(volumeArgs);
-    OzoneClientUtils.verifyResourceName(volumeName);
-    proxy.createVolume(volumeName, volumeArgs);
-  }
-
-  /**
-   * Returns the volume information.
-   * @param volumeName Name of the volume.
-   * @return OzoneVolume
-   * @throws IOException
-   */
-  public OzoneVolume getVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    OzoneClientUtils.verifyResourceName(volumeName);
-    OzoneVolume volume = proxy.getVolumeDetails(volumeName);
-    return volume;
-  }
-
-
-  /**
-   * Returns Iterator to iterate over all the volumes in object store.
-   * The result can be restricted using volume prefix, will return all
-   * volumes if volume prefix is null.
-   *
-   * @param volumePrefix Volume prefix to match
-   * @return {@code Iterator<OzoneVolume>}
-   */
-  public Iterator<OzoneVolume> listVolumes(String volumePrefix)
-      throws IOException {
-    return new VolumeIterator(volumePrefix);
-  }
-
-  /**
-   * Returns Iterator to iterate over the List of volumes owned by a specific
-   * user. The result can be restricted using volume prefix, will return all
-   * volumes if volume prefix is null. If user is null, returns the volume of
-   * current user.
-   *
-   * @param user User Name
-   * @param volumePrefix Volume prefix to match
-   * @return {@code Iterator<OzoneVolume>}
-   */
-  public Iterator<OzoneVolume> listVolumes(String user, String volumePrefix)
-      throws IOException {
-    if(Strings.isNullOrEmpty(user)) {
-      user = UserGroupInformation.getCurrentUser().getShortUserName();
-    }
-    return new VolumeIterator(user, volumePrefix);
-  }
-
-  /**
-   * Deletes the volume.
-   * @param volumeName Name of the volume.
-   * @throws IOException
-   */
-  public void deleteVolume(String volumeName) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    OzoneClientUtils.verifyResourceName(volumeName);
-    proxy.deleteVolume(volumeName);
-  }
-
-  /**
-   * An Iterator to iterate over {@link OzoneVolume} list.
-   */
-  private class VolumeIterator implements Iterator<OzoneVolume> {
-
-    private String user = null;
-    private String volPrefix = null;
-
-    private Iterator<OzoneVolume> currentIterator;
-    private OzoneVolume currentValue;
-
-    /**
-     * Creates an Iterator to iterate over all volumes in the cluster,
-     * which matches the volume prefix.
-     * @param volPrefix prefix to match
-     */
-    VolumeIterator(String volPrefix) {
-      this(null, volPrefix);
-    }
-
-    /**
-     * Creates an Iterator to iterate over all volumes of the user,
-     * which matches volume prefix.
-     * @param user user name
-     * @param volPrefix volume prefix to match
-     */
-    VolumeIterator(String user, String volPrefix) {
-      this.user = user;
-      this.volPrefix = volPrefix;
-      this.currentValue = null;
-      this.currentIterator = getNextListOfVolumes(null).iterator();
-    }
-
-    @Override
-    public boolean hasNext() {
-      if(!currentIterator.hasNext()) {
-        currentIterator = getNextListOfVolumes(
-            currentValue != null ? currentValue.getName() : null)
-            .iterator();
-      }
-      return currentIterator.hasNext();
-    }
-
-    @Override
-    public OzoneVolume next() {
-      if(hasNext()) {
-        currentValue = currentIterator.next();
-        return currentValue;
-      }
-      throw new NoSuchElementException();
-    }
-
-    /**
-     * Returns the next set of volume list using proxy.
-     * @param prevVolume previous volume, this will be excluded from the result
-     * @return {@code List<OzoneVolume>}
-     */
-    private List<OzoneVolume> getNextListOfVolumes(String prevVolume) {
-      try {
-        //if user is null, we do list of all volumes.
-        if(user != null) {
-          return proxy.listVolumes(user, volPrefix, prevVolume, listCacheSize);
-        }
-        return proxy.listVolumes(volPrefix, prevVolume, listCacheSize);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
deleted file mode 100644
index b94e0f7..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ /dev/null
@@ -1,360 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.OzoneAcl;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NoSuchElementException;
-
-/**
- * A class that encapsulates OzoneBucket.
- */
-public class OzoneBucket {
-
-  /**
-   * The proxy used for connecting to the cluster and perform
-   * client operations.
-   */
-  private final ClientProtocol proxy;
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String name;
-  /**
-   * Default replication factor to be used while creating keys.
-   */
-  private final ReplicationFactor defaultReplication;
-
-  /**
-   * Default replication type to be used while creating keys.
-   */
-  private final ReplicationType defaultReplicationType;
-  /**
-   * Bucket ACLs.
-   */
-  private List<OzoneAcl> acls;
-
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean versioning;
-
-  /**
-   * Cache size to be used for listKey calls.
-   */
-  private int listCacheSize;
-
-  /**
-   * Creation time of the bucket.
-   */
-  private long creationTime;
-
-  /**
-   * Constructs OzoneBucket instance.
-   * @param conf Configuration object.
-   * @param proxy ClientProtocol proxy.
-   * @param volumeName Name of the volume the bucket belongs to.
-   * @param bucketName Name of the bucket.
-   * @param acls ACLs associated with the bucket.
-   * @param storageType StorageType of the bucket.
-   * @param versioning versioning status of the bucket.
-   * @param creationTime creation time of the bucket.
-   */
-  public OzoneBucket(Configuration conf, ClientProtocol proxy,
-                     String volumeName, String bucketName,
-                     List<OzoneAcl> acls, StorageType storageType,
-                     Boolean versioning, long creationTime) {
-    this.proxy = proxy;
-    this.volumeName = volumeName;
-    this.name = bucketName;
-    this.acls = acls;
-    this.storageType = storageType;
-    this.versioning = versioning;
-    this.listCacheSize = OzoneClientUtils.getListCacheSize(conf);
-    this.creationTime = creationTime;
-    this.defaultReplication = ReplicationFactor.valueOf(conf.getInt(
-        OzoneConfigKeys.OZONE_REPLICATION,
-        OzoneConfigKeys.OZONE_REPLICATION_DEFAULT));
-    this.defaultReplicationType = ReplicationType.valueOf(conf.get(
-        OzoneConfigKeys.OZONE_REPLICATION_TYPE,
-        OzoneConfigKeys.OZONE_REPLICATION_TYPE_DEFAULT));
-  }
-
-  /**
-   * Returns Volume Name.
-   *
-   * @return volumeName
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns Bucket Name.
-   *
-   * @return bucketName
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns ACL's associated with the Bucket.
-   *
-   * @return acls
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Returns StorageType of the Bucket.
-   *
-   * @return storageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns Versioning associated with the Bucket.
-   *
-   * @return versioning
-   */
-  public Boolean getVersioning() {
-    return versioning;
-  }
-
-  /**
-   * Returns creation time of the Bucket.
-   *
-   * @return creation time of the bucket
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Adds ACLs to the Bucket.
-   * @param addAcls ACLs to be added
-   * @throws IOException
-   */
-  public void addAcls(List<OzoneAcl> addAcls) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(addAcls);
-    proxy.addBucketAcls(volumeName, name, addAcls);
-    addAcls.stream().filter(acl -> !acls.contains(acl)).forEach(
-        acls::add);
-  }
-
-  /**
-   * Removes ACLs from the bucket.
-   * @param removeAcls ACLs to be removed
-   * @throws IOException
-   */
-  public void removeAcls(List<OzoneAcl> removeAcls) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(removeAcls);
-    proxy.removeBucketAcls(volumeName, name, removeAcls);
-    acls.removeAll(removeAcls);
-  }
-
-  /**
-   * Sets/Changes the storage type of the bucket.
-   * @param newStorageType Storage type to be set
-   * @throws IOException
-   */
-  public void setStorageType(StorageType newStorageType) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(newStorageType);
-    proxy.setBucketStorageType(volumeName, name, newStorageType);
-    storageType = newStorageType;
-  }
-
-  /**
-   * Enable/Disable versioning of the bucket.
-   * @param newVersioning
-   * @throws IOException
-   */
-  public void setVersioning(Boolean newVersioning) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(newVersioning);
-    proxy.setBucketVersioning(volumeName, name, newVersioning);
-    versioning = newVersioning;
-  }
-
-  /**
-   * Creates a new key in the bucket, with default replication type RATIS and
-   * with replication factor THREE.
-   * @param key Name of the key to be created.
-   * @param size Size of the data the key will point to.
-   * @return OzoneOutputStream to which the data has to be written.
-   * @throws IOException
-   */
-  public OzoneOutputStream createKey(String key, long size)
-      throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
-    return createKey(key, size, defaultReplicationType, defaultReplication);
-  }
-
-  /**
-   * Creates a new key in the bucket.
-   * @param key Name of the key to be created.
-   * @param size Size of the data the key will point to.
-   * @param type Replication type to be used.
-   * @param factor Replication factor of the key.
-   * @return OzoneOutputStream to which the data has to be written.
-   * @throws IOException
-   */
-  public OzoneOutputStream createKey(String key, long size,
-                                     ReplicationType type,
-                                     ReplicationFactor factor)
-      throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
-    Preconditions.checkNotNull(type);
-    Preconditions.checkNotNull(factor);
-    return proxy.createKey(volumeName, name, key, size, type, factor);
-  }
-
-  /**
-   * Reads an existing key from the bucket.
-   * @param key Name of the key to be read.
-   * @return OzoneInputStream the stream using which the data can be read.
-   * @throws IOException
-   */
-  public OzoneInputStream readKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
-    return proxy.getKey(volumeName, name, key);
-  }
-
-  /**
-   * Returns information about the key.
-   * @param key Name of the key.
-   * @return OzoneKey Information about the key.
-   * @throws IOException
-   */
-  public OzoneKey getKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
-    return proxy.getKeyDetails(volumeName, name, key);
-  }
-
-  /**
-   * Returns Iterator to iterate over all keys in the bucket.
-   * The result can be restricted using key prefix, will return all
-   * keys if key prefix is null.
-   *
-   * @param keyPrefix Bucket prefix to match
-   * @return {@code Iterator<OzoneKey>}
-   */
-  public Iterator<OzoneKey> listKeys(String keyPrefix) {
-    return new KeyIterator(keyPrefix);
-  }
-
-  /**
-   * Deletes key from the bucket.
-   * @param key Name of the key to be deleted.
-   * @throws IOException
-   */
-  public void deleteKey(String key) throws IOException {
-    Preconditions.checkNotNull(proxy, "Client proxy is not set.");
-    Preconditions.checkNotNull(key);
-    proxy.deleteKey(volumeName, name, key);
-  }
-
-  /**
-   * An Iterator to iterate over {@link OzoneKey} list.
-   */
-  private class KeyIterator implements Iterator<OzoneKey> {
-
-    private String keyPrefix = null;
-
-    private Iterator<OzoneKey> currentIterator;
-    private OzoneKey currentValue;
-
-
-    /**
-     * Creates an Iterator to iterate over all keys in the bucket,
-     * which matches volume prefix.
-     * @param keyPrefix
-     */
-    KeyIterator(String keyPrefix) {
-      this.keyPrefix = keyPrefix;
-      this.currentValue = null;
-      this.currentIterator = getNextListOfKeys(null).iterator();
-    }
-
-    @Override
-    public boolean hasNext() {
-      if(!currentIterator.hasNext()) {
-        currentIterator = getNextListOfKeys(
-            currentValue != null ? currentValue.getName() : null)
-            .iterator();
-      }
-      return currentIterator.hasNext();
-    }
-
-    @Override
-    public OzoneKey next() {
-      if(hasNext()) {
-        currentValue = currentIterator.next();
-        return currentValue;
-      }
-      throw new NoSuchElementException();
-    }
-
-    /**
-     * Gets the next set of key list using proxy.
-     * @param prevKey
-     * @return {@code List<OzoneVolume>}
-     */
-    private List<OzoneKey> getNextListOfKeys(String prevKey) {
-      try {
-        return proxy.listKeys(volumeName, name, keyPrefix, prevKey,
-            listCacheSize);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce23d9ad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
deleted file mode 100644
index f191507..0000000
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-
-import java.io.Closeable;
-import java.io.IOException;
-
-/**
- * OzoneClient connects to Ozone Cluster and
- * perform basic operations.
- */
-public class OzoneClient implements Closeable {
-
-  /*
-   * OzoneClient connects to Ozone Cluster and
-   * perform basic operations.
-   *
-   * +-------------+     +---+   +-------------------------------------+
-   * | OzoneClient | --> | C |   | Object Store                        |
-   * |_____________|     | l |   |  +-------------------------------+  |
-   *                     | i |   |  | Volume(s)                     |  |
-   *                     | e |   |  |   +------------------------+  |  |
-   *                     | n |   |  |   | Bucket(s)              |  |  |
-   *                     | t |   |  |   |   +------------------+ |  |  |
-   *                     |   |   |  |   |   | Key -> Value (s) | |  |  |
-   *                     | P |-->|  |   |   |                  | |  |  |
-   *                     | r |   |  |   |   |__________________| |  |  |
-   *                     | o |   |  |   |                        |  |  |
-   *                     | t |   |  |   |________________________|  |  |
-   *                     | o |   |  |                               |  |
-   *                     | c |   |  |_______________________________|  |
-   *                     | o |   |                                     |
-   *                     | l |   |_____________________________________|
-   *                     |___|
-   * Example:
-   * ObjectStore store = client.getObjectStore();
-   * store.createVolume(“volume one”, VolumeArgs);
-   * volume.setQuota(“10 GB”);
-   * OzoneVolume volume = store.getVolume(“volume one”);
-   * volume.createBucket(“bucket one”, BucketArgs);
-   * bucket.setVersioning(true);
-   * OzoneOutputStream os = bucket.createKey(“key one”, 1024);
-   * os.write(byte[]);
-   * os.close();
-   * OzoneInputStream is = bucket.readKey(“key one”);
-   * is.read();
-   * is.close();
-   * bucket.deleteKey(“key one”);
-   * volume.deleteBucket(“bucket one”);
-   * store.deleteVolume(“volume one”);
-   * client.close();
-   */
-
-  private final ClientProtocol proxy;
-  private final ObjectStore objectStore;
-
-  /**
-   * Creates a new OzoneClient object, generally constructed
-   * using {@link OzoneClientFactory}.
-   * @param conf Configuration object
-   * @param proxy ClientProtocol proxy instance
-   */
-  public OzoneClient(Configuration conf, ClientProtocol proxy) {
-    this.proxy = proxy;
-    this.objectStore = new ObjectStore(conf, this.proxy);
-  }
-
-  /**
-   * Returns the object store associated with the Ozone Cluster.
-   * @return ObjectStore
-   */
-  public ObjectStore getObjectStore() {
-    return objectStore;
-  }
-
-  /**
-   * Closes the client and all the underlying resources.
-   * @throws IOException
-   */
-  @Override
-  public void close() throws IOException {
-    proxy.close();
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to