http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
deleted file mode 100644
index 837c95b..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
-    .ScmBlockLocationProtocolService;
-import org.apache.hadoop.ipc.ProtocolInfo;
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  This extends 
the
- * Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.ScmBlockLocationProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-public interface ScmBlockLocationProtocolPB
-    extends ScmBlockLocationProtocolService.BlockingInterface {
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
deleted file mode 100644
index 16819e9..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,371 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitChillModeRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitChillModeResponseProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InChillModeRequestProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InChillModeResponseProto;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.GetContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.PipelineResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * This class is the client-side translator to translate the requests made on
- * the {@link StorageContainerLocationProtocol} interface to the RPC server
- * implementing {@link StorageContainerLocationProtocolPB}.
- */
-@InterfaceAudience.Private
-public final class StorageContainerLocationProtocolClientSideTranslatorPB
-    implements StorageContainerLocationProtocol, ProtocolTranslator, Closeable 
{
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final StorageContainerLocationProtocolPB rpcProxy;
-
-  /**
-   * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
-   *
-   * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
-   */
-  public StorageContainerLocationProtocolClientSideTranslatorPB(
-      StorageContainerLocationProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Asks SCM where a container should be allocated. SCM responds with the set
-   * of datanodes that should be used creating this container. Ozone/SCM only
-   * supports replication factor of either 1 or 3.
-   * @param type - Replication Type
-   * @param factor - Replication Count
-   * @return
-   * @throws IOException
-   */
-  @Override
-  public ContainerWithPipeline allocateContainer(
-      HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor,
-      String owner) throws IOException {
-
-    ContainerRequestProto request = ContainerRequestProto.newBuilder()
-        .setReplicationFactor(factor)
-        .setReplicationType(type)
-        .setOwner(owner)
-        .build();
-
-    final ContainerResponseProto response;
-    try {
-      response = rpcProxy.allocateContainer(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (response.getErrorCode() != ContainerResponseProto.Error.success) {
-      throw new IOException(response.hasErrorMessage() ?
-          response.getErrorMessage() : "Allocate container failed.");
-    }
-    return ContainerWithPipeline.fromProtobuf(
-        response.getContainerWithPipeline());
-  }
-
-  public ContainerInfo getContainer(long containerID) throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    GetContainerRequestProto request = GetContainerRequestProto
-        .newBuilder()
-        .setContainerID(containerID)
-        .build();
-    try {
-      GetContainerResponseProto response =
-          rpcProxy.getContainer(NULL_RPC_CONTROLLER, request);
-      return ContainerInfo.fromProtobuf(response.getContainerInfo());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public ContainerWithPipeline getContainerWithPipeline(long containerID)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    GetContainerWithPipelineRequestProto request =
-        GetContainerWithPipelineRequestProto.newBuilder()
-            .setContainerID(containerID).build();
-    try {
-      GetContainerWithPipelineResponseProto response =
-          rpcProxy.getContainerWithPipeline(NULL_RPC_CONTROLLER, request);
-      return ContainerWithPipeline.fromProtobuf(
-          response.getContainerWithPipeline());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<ContainerInfo> listContainer(long startContainerID, int count)
-      throws IOException {
-    Preconditions.checkState(startContainerID >= 0,
-        "Container ID cannot be negative.");
-    Preconditions.checkState(count > 0,
-        "Container count must be greater than 0.");
-    SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto
-        .newBuilder();
-    builder.setStartContainerID(startContainerID);
-    builder.setCount(count);
-    SCMListContainerRequestProto request = builder.build();
-
-    try {
-      SCMListContainerResponseProto response =
-          rpcProxy.listContainer(NULL_RPC_CONTROLLER, request);
-      List<ContainerInfo> containerList = new ArrayList<>();
-      for (HddsProtos.SCMContainerInfo containerInfoProto : response
-          .getContainersList()) {
-        containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
-      }
-      return containerList;
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Ask SCM to delete a container by name. SCM will remove
-   * the container mapping in its database.
-   *
-   * @param containerID
-   * @throws IOException
-   */
-  @Override
-  public void deleteContainer(long containerID)
-      throws IOException {
-    Preconditions.checkState(containerID >= 0,
-        "Container ID cannot be negative");
-    SCMDeleteContainerRequestProto request = SCMDeleteContainerRequestProto
-        .newBuilder()
-        .setContainerID(containerID)
-        .build();
-    try {
-      rpcProxy.deleteContainer(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Queries a list of Node Statuses.
-   *
-   * @param nodeStatuses
-   * @return List of Datanodes.
-   */
-  @Override
-  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
-      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
-      throws IOException {
-    // TODO : We support only cluster wide query right now. So ignoring 
checking
-    // queryScope and poolName
-    Preconditions.checkNotNull(nodeStatuses);
-    NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder()
-        .setState(nodeStatuses)
-        .setScope(queryScope).setPoolName(poolName).build();
-    try {
-      NodeQueryResponseProto response =
-          rpcProxy.queryNode(NULL_RPC_CONTROLLER, request);
-      return response.getDatanodesList();
-    } catch (ServiceException e) {
-      throw  ProtobufHelper.getRemoteException(e);
-    }
-
-  }
-
-  /**
-   * Notify from client that creates object on datanodes.
-   * @param type object type
-   * @param id object id
-   * @param op operation type (e.g., create, close, delete)
-   * @param stage object creation stage : begin/complete
-   */
-  @Override
-  public void notifyObjectStageChange(
-      ObjectStageChangeRequestProto.Type type, long id,
-      ObjectStageChangeRequestProto.Op op,
-      ObjectStageChangeRequestProto.Stage stage) throws IOException {
-    Preconditions.checkState(id >= 0,
-        "Object id cannot be negative.");
-    ObjectStageChangeRequestProto request =
-        ObjectStageChangeRequestProto.newBuilder()
-            .setType(type)
-            .setId(id)
-            .setOp(op)
-            .setStage(stage)
-            .build();
-    try {
-      rpcProxy.notifyObjectStageChange(NULL_RPC_CONTROLLER, request);
-    } catch(ServiceException e){
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Creates a replication pipeline of a specified type.
-   *
-   * @param replicationType - replication type
-   * @param factor - factor 1 or 3
-   * @param nodePool - optional machine list to build a pipeline.
-   * @throws IOException
-   */
-  @Override
-  public Pipeline createReplicationPipeline(HddsProtos.ReplicationType
-      replicationType, HddsProtos.ReplicationFactor factor, HddsProtos
-      .NodePool nodePool) throws IOException {
-    PipelineRequestProto request = PipelineRequestProto.newBuilder()
-        .setNodePool(nodePool)
-        .setReplicationFactor(factor)
-        .setReplicationType(replicationType)
-        .build();
-    try {
-      PipelineResponseProto response =
-          rpcProxy.allocatePipeline(NULL_RPC_CONTROLLER, request);
-      if (response.getErrorCode() ==
-          PipelineResponseProto.Error.success) {
-        Preconditions.checkState(response.hasPipeline(), "With success, " +
-            "must come a pipeline");
-        return Pipeline.getFromProtoBuf(response.getPipeline());
-      } else {
-        String errorMessage = String.format("create replication pipeline " +
-                "failed. code : %s Message: %s", response.getErrorCode(),
-            response.hasErrorMessage() ? response.getErrorMessage() : "");
-        throw new IOException(errorMessage);
-      }
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public ScmInfo getScmInfo() throws IOException {
-    HddsProtos.GetScmInfoRequestProto request =
-        HddsProtos.GetScmInfoRequestProto.getDefaultInstance();
-    try {
-      HddsProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo(
-          NULL_RPC_CONTROLLER, request);
-      ScmInfo.Builder builder = new ScmInfo.Builder()
-          .setClusterId(resp.getClusterId())
-          .setScmId(resp.getScmId());
-      return builder.build();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-  }
-
-  /**
-   * Check if SCM is in chill mode.
-   *
-   * @return Returns true if SCM is in chill mode else returns false.
-   * @throws IOException
-   */
-  @Override
-  public boolean inChillMode() throws IOException {
-    InChillModeRequestProto request =
-        InChillModeRequestProto.getDefaultInstance();
-    try {
-      InChillModeResponseProto resp = rpcProxy.inChillMode(
-          NULL_RPC_CONTROLLER, request);
-      return resp.getInChillMode();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  /**
-   * Force SCM out of Chill mode.
-   *
-   * @return returns true if operation is successful.
-   * @throws IOException
-   */
-  @Override
-  public boolean forceExitChillMode() throws IOException {
-    ForceExitChillModeRequestProto request =
-        ForceExitChillModeRequestProto.getDefaultInstance();
-    try {
-      ForceExitChillModeResponseProto resp = rpcProxy
-          .forceExitChillMode(NULL_RPC_CONTROLLER, request);
-      return resp.getExitedChillMode();
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-
-  @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
deleted file mode 100644
index f234ad3..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerLocationProtocolProtos
-    .StorageContainerLocationProtocolService;
-import org.apache.hadoop.ipc.ProtocolInfo;
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  This extends 
the
- * Protocol Buffers service interface to add Hadoop-specific annotations.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-public interface StorageContainerLocationProtocolPB
-    extends StorageContainerLocationProtocolService.BlockingInterface {
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
deleted file mode 100644
index 652ae60..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocolPB;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
deleted file mode 100644
index 6b7a328..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .BlockNotCommittedException;
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CloseContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .DatanodeBlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetBlockResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetSmallFileResponseProto;
-import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutBlockRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutSmallFileRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadContainerResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
-import org.apache.hadoop.hdds.client.BlockID;
-
-import java.io.IOException;
-
-/**
- * Implementation of all container protocol calls performed by Container
- * clients.
- */
-public final class ContainerProtocolCalls  {
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private ContainerProtocolCalls() {
-  }
-
-  /**
-   * Calls the container protocol to get a container block.
-   *
-   * @param xceiverClient client to perform call
-   * @param datanodeBlockID blockID to identify container
-   * @param traceID container protocol call args
-   * @return container protocol get block response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
-      DatanodeBlockID datanodeBlockID, String traceID) throws IOException {
-    GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
-        .newBuilder()
-        .setBlockID(datanodeBlockID);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.GetBlock)
-        .setContainerID(datanodeBlockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setGetBlock(readBlockRequest)
-        .build();
-    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-
-    return response.getGetBlock();
-  }
-
-  /**
-   * Calls the container protocol to get the length of a committed block.
-   *
-   * @param xceiverClient client to perform call
-   * @param blockID blockId for the Block
-   * @param traceID container protocol call args
-   * @return container protocol getLastCommittedBlockLength response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static ContainerProtos.GetCommittedBlockLengthResponseProto
-      getCommittedBlockLength(
-          XceiverClientSpi xceiverClient, BlockID blockID, String traceID)
-      throws IOException {
-    ContainerProtos.GetCommittedBlockLengthRequestProto.Builder
-        getBlockLengthRequestBuilder =
-        ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder().
-            setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(Type.GetCommittedBlockLength)
-            .setContainerID(blockID.getContainerID())
-            .setTraceID(traceID)
-            .setDatanodeUuid(id)
-            .setGetCommittedBlockLength(getBlockLengthRequestBuilder).build();
-    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-    return response.getGetCommittedBlockLength();
-  }
-
-  /**
-   * Calls the container protocol to put a container block.
-   *
-   * @param xceiverClient client to perform call
-   * @param containerBlockData block data to identify container
-   * @param traceID container protocol call args
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static void putBlock(XceiverClientSpi xceiverClient,
-      BlockData containerBlockData, String traceID) throws IOException {
-    PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto
-        .newBuilder()
-        .setBlockData(containerBlockData);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.PutBlock)
-        .setContainerID(containerBlockData.getBlockID().getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setPutBlock(createBlockRequest)
-        .build();
-    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Calls the container protocol to read a chunk.
-   *
-   * @param xceiverClient client to perform call
-   * @param chunk information about chunk to read
-   * @param blockID ID of the block
-   * @param traceID container protocol call args
-   * @return container protocol read chunk response
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static ReadChunkResponseProto readChunk(XceiverClientSpi 
xceiverClient,
-        ChunkInfo chunk, BlockID blockID, String traceID) throws IOException {
-    ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.ReadChunk)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setReadChunk(readChunkRequest)
-        .build();
-    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-    return response.getReadChunk();
-  }
-
-  /**
-   * Calls the container protocol to write a chunk.
-   *
-   * @param xceiverClient client to perform call
-   * @param chunk information about chunk to write
-   * @param blockID ID of the block
-   * @param data the data of the chunk to write
-   * @param traceID container protocol call args
-   * @throws IOException if there is an I/O error while performing the call
-   */
-  public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo 
chunk,
-      BlockID blockID, ByteString data, String traceID)
-      throws IOException {
-    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk)
-        .setData(data);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.WriteChunk)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setWriteChunk(writeChunkRequest)
-        .build();
-    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Allows writing a small file using single RPC. This takes the container
-   * name, block name and data to write sends all that data to the container
-   * using a single RPC. This API is designed to be used for files which are
-   * smaller than 1 MB.
-   *
-   * @param client - client that communicates with the container.
-   * @param blockID - ID of the block
-   * @param data - Data to be written into the container.
-   * @param traceID - Trace ID for logging purpose.
-   * @throws IOException
-   */
-  public static void writeSmallFile(XceiverClientSpi client,
-      BlockID blockID, byte[] data, String traceID)
-      throws IOException {
-
-    BlockData containerBlockData =
-        BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
-            .build();
-    PutBlockRequestProto.Builder createBlockRequest =
-        PutBlockRequestProto.newBuilder()
-            .setBlockData(containerBlockData);
-
-    KeyValue keyValue =
-        KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
-            .build();
-    ChunkInfo chunk =
-        ChunkInfo.newBuilder().setChunkName(blockID.getLocalID()
-            + "_chunk").setOffset(0).setLen(data.length).
-            addMetadata(keyValue).build();
-
-    PutSmallFileRequestProto putSmallFileRequest =
-        PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)
-            .setBlock(createBlockRequest).setData(ByteString.copyFrom(data))
-            .build();
-
-    String id = client.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto request =
-        ContainerCommandRequestProto.newBuilder()
-            .setCmdType(Type.PutSmallFile)
-            .setContainerID(blockID.getContainerID())
-            .setTraceID(traceID)
-            .setDatanodeUuid(id)
-            .setPutSmallFile(putSmallFileRequest)
-            .build();
-    ContainerCommandResponseProto response = client.sendCommand(request);
-    validateContainerResponse(response);
-  }
-
-  /**
-   * createContainer call that creates a container on the datanode.
-   * @param client  - client
-   * @param containerID - ID of container
-   * @param traceID - traceID
-   * @throws IOException
-   */
-  public static void createContainer(XceiverClientSpi client, long containerID,
-      String traceID) throws IOException {
-    ContainerProtos.CreateContainerRequestProto.Builder createRequest =
-        ContainerProtos.CreateContainerRequestProto
-            .newBuilder();
-    createRequest.setContainerType(ContainerProtos.ContainerType
-        .KeyValueContainer);
-
-    String id = client.getPipeline().getLeader().getUuidString();
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.CreateContainer);
-    request.setContainerID(containerID);
-    request.setCreateContainer(createRequest.build());
-    request.setDatanodeUuid(id);
-    request.setTraceID(traceID);
-    ContainerCommandResponseProto response = client.sendCommand(
-        request.build());
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Deletes a container from a pipeline.
-   *
-   * @param client
-   * @param force whether or not to forcibly delete the container.
-   * @param traceID
-   * @throws IOException
-   */
-  public static void deleteContainer(XceiverClientSpi client, long containerID,
-      boolean force, String traceID) throws IOException {
-    ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
-        ContainerProtos.DeleteContainerRequestProto.newBuilder();
-    deleteRequest.setForceDelete(force);
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteContainer);
-    request.setContainerID(containerID);
-    request.setDeleteContainer(deleteRequest);
-    request.setTraceID(traceID);
-    request.setDatanodeUuid(id);
-    ContainerCommandResponseProto response =
-        client.sendCommand(request.build());
-    validateContainerResponse(response);
-  }
-
-  /**
-   * Close a container.
-   *
-   * @param client
-   * @param containerID
-   * @param traceID
-   * @throws IOException
-   */
-  public static void closeContainer(XceiverClientSpi client,
-      long containerID, String traceID) throws IOException {
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(Type.CloseContainer);
-    request.setContainerID(containerID);
-    request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance());
-    request.setTraceID(traceID);
-    request.setDatanodeUuid(id);
-    ContainerCommandResponseProto response =
-        client.sendCommand(request.build());
-    validateContainerResponse(response);
-  }
-
-  /**
-   * readContainer call that gets meta data from an existing container.
-   *
-   * @param client - client
-   * @param traceID - trace ID
-   * @throws IOException
-   */
-  public static ReadContainerResponseProto readContainer(
-      XceiverClientSpi client, long containerID,
-      String traceID) throws IOException {
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto.Builder request =
-        ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(Type.ReadContainer);
-    request.setContainerID(containerID);
-    request.setReadContainer(ReadContainerRequestProto.getDefaultInstance());
-    request.setDatanodeUuid(id);
-    request.setTraceID(traceID);
-    ContainerCommandResponseProto response =
-        client.sendCommand(request.build());
-    validateContainerResponse(response);
-
-    return response.getReadContainer();
-  }
-
-  /**
-   * Reads the data given the blockID.
-   *
-   * @param client
-   * @param blockID - ID of the block
-   * @param traceID - trace ID
-   * @return GetSmallFileResponseProto
-   * @throws IOException
-   */
-  public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi 
client,
-      BlockID blockID, String traceID) throws IOException {
-    GetBlockRequestProto.Builder getBlock = GetBlockRequestProto
-        .newBuilder()
-        .setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    ContainerProtos.GetSmallFileRequestProto getSmallFileRequest =
-        GetSmallFileRequestProto
-            .newBuilder().setBlock(getBlock)
-            .build();
-    String id = client.getPipeline().getLeader().getUuidString();
-
-    ContainerCommandRequestProto request = ContainerCommandRequestProto
-        .newBuilder()
-        .setCmdType(Type.GetSmallFile)
-        .setContainerID(blockID.getContainerID())
-        .setTraceID(traceID)
-        .setDatanodeUuid(id)
-        .setGetSmallFile(getSmallFileRequest)
-        .build();
-    ContainerCommandResponseProto response = client.sendCommand(request);
-    validateContainerResponse(response);
-
-    return response.getGetSmallFile();
-  }
-
-  /**
-   * Validates a response from a container protocol call.  Any non-successful
-   * return code is mapped to a corresponding exception and thrown.
-   *
-   * @param response container protocol call response
-   * @throws IOException if the container protocol call failed
-   */
-  private static void validateContainerResponse(
-      ContainerCommandResponseProto response
-  ) throws StorageContainerException {
-    if (response.getResult() == ContainerProtos.Result.SUCCESS) {
-      return;
-    } else if (response.getResult()
-        == ContainerProtos.Result.BLOCK_NOT_COMMITTED) {
-      throw new BlockNotCommittedException(response.getMessage());
-    }
-    throw new StorageContainerException(
-        response.getMessage(), response.getResult());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
deleted file mode 100644
index 8e98158..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.storage;
-
-/**
- * This package contains StorageContainerManager classes.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
deleted file mode 100644
index 1827b23..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-package org.apache.hadoop.ozone;
-
-import java.util.Objects;
-
-/**
- * OzoneACL classes define bucket ACLs used in OZONE.
- *
- * ACLs in Ozone follow this pattern.
- * <ul>
- * <li>user:name:rw
- * <li>group:name:rw
- * <li>world::rw
- * </ul>
- */
-public class OzoneAcl {
-  private OzoneACLType type;
-  private String name;
-  private OzoneACLRights rights;
-
-  /**
-   * Constructor for OzoneAcl.
-   */
-  public OzoneAcl() {
-  }
-
-  /**
-   * Constructor for OzoneAcl.
-   *
-   * @param type - Type
-   * @param name - Name of user
-   * @param rights - Rights
-   */
-  public OzoneAcl(OzoneACLType type, String name, OzoneACLRights rights) {
-    this.name = name;
-    this.rights = rights;
-    this.type = type;
-    if (type == OzoneACLType.WORLD && name.length() != 0) {
-      throw new IllegalArgumentException("Unexpected name part in world type");
-    }
-    if (((type == OzoneACLType.USER) || (type == OzoneACLType.GROUP))
-        && (name.length() == 0)) {
-      throw new IllegalArgumentException("User or group name is required");
-    }
-  }
-
-  /**
-   * Parses an ACL string and returns the ACL object.
-   *
-   * @param acl - Acl String , Ex. user:anu:rw
-   *
-   * @return - Ozone ACLs
-   */
-  public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException {
-    if ((acl == null) || acl.isEmpty()) {
-      throw new IllegalArgumentException("ACLs cannot be null or empty");
-    }
-    String[] parts = acl.trim().split(":");
-    if (parts.length < 3) {
-      throw new IllegalArgumentException("ACLs are not in expected format");
-    }
-
-    OzoneACLType aclType = OzoneACLType.valueOf(parts[0].toUpperCase());
-    OzoneACLRights rights = OzoneACLRights.getACLRight(parts[2].toLowerCase());
-
-    // TODO : Support sanitation of these user names by calling into
-    // userAuth Interface.
-    return new OzoneAcl(aclType, parts[1], rights);
-  }
-
-  @Override
-  public String toString() {
-    return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights);
-  }
-
-  /**
-   * Returns a hash code value for the object. This method is
-   * supported for the benefit of hash tables.
-   *
-   * @return a hash code value for this object.
-   *
-   * @see Object#equals(Object)
-   * @see System#identityHashCode
-   */
-  @Override
-  public int hashCode() {
-    return Objects.hash(this.getName(), this.getRights().toString(),
-                        this.getType().toString());
-  }
-
-  /**
-   * Returns name.
-   *
-   * @return name
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Returns Rights.
-   *
-   * @return - Rights
-   */
-  public OzoneACLRights getRights() {
-    return rights;
-  }
-
-  /**
-   * Returns Type.
-   *
-   * @return type
-   */
-  public OzoneACLType getType() {
-    return type;
-  }
-
-  /**
-   * Indicates whether some other object is "equal to" this one.
-   *
-   * @param obj the reference object with which to compare.
-   *
-   * @return {@code true} if this object is the same as the obj
-   * argument; {@code false} otherwise.
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    OzoneAcl otherAcl = (OzoneAcl) obj;
-    return otherAcl.getName().equals(this.getName()) &&
-        otherAcl.getRights() == this.getRights() &&
-        otherAcl.getType() == this.getType();
-  }
-
-  /**
-   * ACL types.
-   */
-  public enum OzoneACLType {
-    USER(OzoneConsts.OZONE_ACL_USER_TYPE),
-    GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
-    WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE);
-
-    /**
-     * String value for this Enum.
-     */
-    private final String value;
-
-    /**
-     * Init OzoneACLtypes enum.
-     *
-     * @param val String type for this enum.
-     */
-    OzoneACLType(String val) {
-      value = val;
-    }
-  }
-
-  /**
-   * ACL rights.
-   */
-  public enum OzoneACLRights {
-    READ, WRITE, READ_WRITE;
-
-    /**
-     * Returns the ACL rights based on passed in String.
-     *
-     * @param type ACL right string
-     *
-     * @return OzoneACLRights
-     */
-    public static OzoneACLRights getACLRight(String type) {
-      if (type == null || type.isEmpty()) {
-        throw new IllegalArgumentException("ACL right cannot be empty");
-      }
-
-      switch (type) {
-      case OzoneConsts.OZONE_ACL_READ:
-        return OzoneACLRights.READ;
-      case OzoneConsts.OZONE_ACL_WRITE:
-        return OzoneACLRights.WRITE;
-      case OzoneConsts.OZONE_ACL_READ_WRITE:
-      case OzoneConsts.OZONE_ACL_WRITE_READ:
-        return OzoneACLRights.READ_WRITE;
-      default:
-        throw new IllegalArgumentException("ACL right is not recognized");
-      }
-
-    }
-
-    /**
-     * Returns String representation of ACL rights.
-     * @param acl OzoneACLRights
-     * @return String representation of acl
-     */
-    public static String getACLRightsString(OzoneACLRights acl) {
-      switch(acl) {
-      case READ:
-        return OzoneConsts.OZONE_ACL_READ;
-      case WRITE:
-        return OzoneConsts.OZONE_ACL_WRITE;
-      case READ_WRITE:
-        return OzoneConsts.OZONE_ACL_READ_WRITE;
-      default:
-        throw new IllegalArgumentException("ACL right is not recognized");
-      }
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
deleted file mode 100644
index 599b4e8..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-
-import org.apache.ratis.shaded.proto.RaftProtos.ReplicationLevel;
-import org.apache.ratis.util.TimeDuration;
-
-/**
- * This class contains constants for configuration keys used in Ozone.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public final class OzoneConfigKeys {
-  public static final String OZONE_TAGS_SYSTEM_KEY =
-      "ozone.tags.system";
-  public static final String DFS_CONTAINER_IPC_PORT =
-      "dfs.container.ipc";
-  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
-
-  /**
-   *
-   * When set to true, allocate a random free port for ozone container,
-   * so that a mini cluster is able to launch multiple containers on a node.
-   *
-   * When set to false (default), container port is fixed as specified by
-   * DFS_CONTAINER_IPC_PORT_DEFAULT.
-   */
-  public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
-      "dfs.container.ipc.random.port";
-  public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
-      false;
-
-  /**
-   * Ratis Port where containers listen to.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_PORT =
-      "dfs.container.ratis.ipc";
-  public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
-
-  /**
-   * When set to true, allocate a random free port for ozone container, so that
-   * a mini cluster is able to launch multiple containers on a node.
-   */
-  public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
-      "dfs.container.ratis.ipc.random.port";
-  public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
-      false;
-  public static final String OZONE_ENABLED =
-      "ozone.enabled";
-  public static final boolean OZONE_ENABLED_DEFAULT = false;
-  public static final String OZONE_TRACE_ENABLED_KEY =
-      "ozone.trace.enabled";
-  public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
-
-  public static final String OZONE_METADATA_DIRS =
-      "ozone.metadata.dirs";
-
-  public static final String OZONE_METADATA_STORE_IMPL =
-      "ozone.metastore.impl";
-  public static final String OZONE_METADATA_STORE_IMPL_LEVELDB =
-      "LevelDB";
-  public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB =
-      "RocksDB";
-  public static final String OZONE_METADATA_STORE_IMPL_DEFAULT =
-      OZONE_METADATA_STORE_IMPL_ROCKSDB;
-
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
-      "ozone.metastore.rocksdb.statistics";
-
-  public static final String  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT =
-      "ALL";
-  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF =
-      "OFF";
-
-  public static final String OZONE_CONTAINER_CACHE_SIZE =
-      "ozone.container.cache.size";
-  public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024;
-
-  public static final String OZONE_SCM_BLOCK_SIZE_IN_MB =
-      "ozone.scm.block.size.in.mb";
-  public static final long OZONE_SCM_BLOCK_SIZE_DEFAULT = 256;
-
-  /**
-   * Ozone administrator users delimited by comma.
-   * If not set, only the user who launches an ozone service will be the
-   * admin user. This property must be set if ozone services are started by
-   * different users. Otherwise the RPC layer will reject calls from
-   * other servers which are started by users not in the list.
-   * */
-  public static final String OZONE_ADMINISTRATORS =
-      "ozone.administrators";
-
-  public static final String OZONE_CLIENT_PROTOCOL =
-      "ozone.client.protocol";
-
-  // This defines the overall connection limit for the connection pool used in
-  // RestClient.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX =
-      "ozone.rest.client.http.connection.max";
-  public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100;
-
-  // This defines the connection limit per one HTTP route/host.
-  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX =
-      "ozone.rest.client.http.connection.per-route.max";
-
-  public static final int
-      OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
-
-  public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
-      "ozone.client.socket.timeout";
-  public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
-  public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
-      "ozone.client.connection.timeout";
-  public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
-
-  public static final String OZONE_REPLICATION = "ozone.replication";
-  public static final int OZONE_REPLICATION_DEFAULT =
-      ReplicationFactor.THREE.getValue();
-
-  public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type";
-  public static final String OZONE_REPLICATION_TYPE_DEFAULT =
-      ReplicationType.RATIS.toString();
-
-  /**
-   * Configuration property to configure the cache size of client list calls.
-   */
-  public static final String OZONE_CLIENT_LIST_CACHE_SIZE =
-      "ozone.client.list.cache";
-  public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000;
-
-  /**
-   * Configuration properties for Ozone Block Deleting Service.
-   */
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
-      "ozone.block.deleting.service.interval";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
-      = "60s";
-
-  /**
-   * The interval of open key clean service.
-   */
-  public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS =
-      "ozone.open.key.cleanup.service.interval.seconds";
-  public static final int
-      OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT
-      = 24 * 3600; // a total of 24 hour
-
-  /**
-   * An open key gets cleaned up when it is being in open state for too long.
-   */
-  public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS =
-      "ozone.open.key.expire.threshold";
-  public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT =
-      24 * 3600;
-
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
-      "ozone.block.deleting.service.timeout";
-  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
-      = "300s"; // 300s for default
-
-  public static final String OZONE_KEY_PREALLOCATION_MAXSIZE =
-      "ozone.key.preallocation.maxsize";
-  public static final long OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT
-      = 128 * OzoneConsts.MB;
-
-  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER =
-      "ozone.block.deleting.limit.per.task";
-  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
-      = 1000;
-
-  public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL
-      = "ozone.block.deleting.container.limit.per.interval";
-  public static final int
-      OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
-
-  public static final String OZONE_CLIENT_MAX_RETRIES =
-      "ozone.client.max.retries";
-  public static final int OZONE_CLIENT_MAX_RETRIES_DEFAULT = 50;
-
-  public static final String OZONE_CLIENT_RETRY_INTERVAL =
-      "ozone.client.retry.interval";
-  public static final String OZONE_CLIENT_RETRY_INTERVAL_DEFAULT = "200ms";
-
-  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY;
-  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY;
-  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY;
-  public static final ReplicationLevel
-      DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
-  public static final int DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
-      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
-  public static final int DFS_CONTAINER_CHUNK_MAX_SIZE
-      = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
-  public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
-      "dfs.container.ratis.datanode.storage.dir";
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT;
-  public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY;
-  public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT;
-  public static final String DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY;
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_DEFAULT;
-  public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY 
=
-      ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT;
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
-  public static final String
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT;
-
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT;
-
-  public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "ozone.web.authentication.kerberos.principal";
-
-  public static final String HDDS_DATANODE_PLUGINS_KEY =
-      "hdds.datanode.plugins";
-
-  public static final String
-      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD =
-      "hdds.datanode.storage.utilization.warning.threshold";
-  public static final double
-      HDDS_DATANODE_STORAGE_UTILIZATION_WARNING_THRESHOLD_DEFAULT = 0.95;
-  public static final String
-      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD =
-      "hdds.datanode.storage.utilization.critical.threshold";
-  public static final double
-      HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.75;
-
-  public static final String
-      HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY =
-      "hdds.write.lock.reporting.threshold.ms";
-  public static final long
-      HDDS_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
-  public static final String
-      HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_KEY =
-      "hdds.lock.suppress.warning.interval.ms";
-  public static final long
-      HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT = 10000L;
-
-  public static final String OZONE_CONTAINER_COPY_WORKDIR =
-      "hdds.datanode.replication.work.dir";
-
-  /**
-   * There is no need to instantiate this class.
-   */
-  private OzoneConfigKeys() {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
deleted file mode 100644
index 0a15ec8..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * Set of constants used in Ozone implementation.
- */
-@InterfaceAudience.Private
-public final class OzoneConsts {
-
-
-  public static final String STORAGE_DIR = "scm";
-  public static final String SCM_ID = "scmUuid";
-
-  public static final String OZONE_SIMPLE_ROOT_USER = "root";
-  public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
-
-  public static final String STORAGE_ID = "storageID";
-  public static final String DATANODE_UUID = "datanodeUuid";
-  public static final String CLUSTER_ID = "clusterID";
-  public static final String LAYOUTVERSION = "layOutVersion";
-  public static final String CTIME = "ctime";
-  /*
-   * BucketName length is used for both buckets and volume lengths
-   */
-  public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3;
-  public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63;
-
-  public static final String OZONE_ACL_USER_TYPE = "user";
-  public static final String OZONE_ACL_GROUP_TYPE = "group";
-  public static final String OZONE_ACL_WORLD_TYPE = "world";
-
-  public static final String OZONE_ACL_READ = "r";
-  public static final String OZONE_ACL_WRITE = "w";
-  public static final String OZONE_ACL_READ_WRITE = "rw";
-  public static final String OZONE_ACL_WRITE_READ = "wr";
-
-  public static final String OZONE_DATE_FORMAT =
-      "EEE, dd MMM yyyy HH:mm:ss zzz";
-  public static final String OZONE_TIME_ZONE = "GMT";
-
-  public static final String OZONE_COMPONENT = "component";
-  public static final String OZONE_FUNCTION  = "function";
-  public static final String OZONE_RESOURCE = "resource";
-  public static final String OZONE_USER = "user";
-  public static final String OZONE_REQUEST = "request";
-
-  public static final String OZONE_URI_SCHEME = "o3";
-  public static final String OZONE_HTTP_SCHEME = "http";
-  public static final String OZONE_URI_DELIMITER = "/";
-
-  public static final String CONTAINER_EXTENSION = ".container";
-  public static final String CONTAINER_META = ".meta";
-
-  // Refer to {@link ContainerReader} for container storage layout on disk.
-  public static final String CONTAINER_PREFIX  = "containers";
-  public static final String CONTAINER_META_PATH = "metadata";
-  public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
-  public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
-  public static final String CONTAINER_ROOT_PREFIX = "repository";
-
-  public static final String FILE_HASH = "SHA-256";
-  public final static String CHUNK_OVERWRITE = "OverWriteRequested";
-
-  public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
-  public static final long KB = 1024L;
-  public static final long MB = KB * 1024L;
-  public static final long GB = MB * 1024L;
-  public static final long TB = GB * 1024L;
-
-  /**
-   * level DB names used by SCM and data nodes.
-   */
-  public static final String CONTAINER_DB_SUFFIX = "container.db";
-  public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
-  public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
-  public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX;
-  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
-  public static final String DELETED_BLOCK_DB = "deletedBlock.db";
-  public static final String OM_DB_NAME = "om.db";
-
-  public static final String STORAGE_DIR_CHUNKS = "chunks";
-
-  /**
-   * Supports Bucket Versioning.
-   */
-  public enum Versioning {
-    NOT_DEFINED, ENABLED, DISABLED;
-
-    public static Versioning getVersioning(boolean versioning) {
-      return versioning ? ENABLED : DISABLED;
-    }
-  }
-
-  public static final String DELETING_KEY_PREFIX = "#deleting#";
-  public static final String DELETED_KEY_PREFIX = "#deleted#";
-  public static final String DELETE_TRANSACTION_KEY_PREFIX = "#delTX#";
-
-  /**
-   * OM LevelDB prefixes.
-   *
-   * OM DB stores metadata as KV pairs with certain prefixes,
-   * prefix is used to improve the performance to get related
-   * metadata.
-   *
-   * OM DB Schema:
-   *  ----------------------------------------------------------
-   *  |  KEY                                     |     VALUE   |
-   *  ----------------------------------------------------------
-   *  | $userName                                |  VolumeList |
-   *  ----------------------------------------------------------
-   *  | /#volumeName                             |  VolumeInfo |
-   *  ----------------------------------------------------------
-   *  | /#volumeName/#bucketName                 |  BucketInfo |
-   *  ----------------------------------------------------------
-   *  | /volumeName/bucketName/keyName           |  KeyInfo    |
-   *  ----------------------------------------------------------
-   *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
-   *  ----------------------------------------------------------
-   */
-
-  public static final String OM_KEY_PREFIX = "/";
-  public static final String OM_USER_PREFIX = "$";
-
-  /**
-   * Max OM Quota size of 1024 PB.
-   */
-  public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
-
-  /**
-   * Max number of keys returned per list buckets operation.
-   */
-  public static final int MAX_LISTBUCKETS_SIZE  = 1024;
-
-  /**
-   * Max number of keys returned per list keys operation.
-   */
-  public static final int MAX_LISTKEYS_SIZE  = 1024;
-
-  /**
-   * Max number of volumes returned per list volumes operation.
-   */
-  public static final int MAX_LISTVOLUMES_SIZE = 1024;
-
-  public static final int INVALID_PORT = -1;
-
-
-  // The ServiceListJSONServlet context attribute where OzoneManager
-  // instance gets stored.
-  public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om";
-
-  private OzoneConsts() {
-    // Never Constructed
-  }
-
-  // YAML fields for .container files
-  public static final String CONTAINER_ID = "containerID";
-  public static final String CONTAINER_TYPE = "containerType";
-  public static final String STATE = "state";
-  public static final String METADATA = "metadata";
-  public static final String MAX_SIZE = "maxSize";
-  public static final String METADATA_PATH = "metadataPath";
-  public static final String CHUNKS_PATH = "chunksPath";
-  public static final String CONTAINER_DB_TYPE = "containerDBType";
-  public static final String CHECKSUM = "checksum";
-
-  // For OM Audit usage
-  public static final String VOLUME = "volume";
-  public static final String BUCKET = "bucket";
-  public static final String KEY = "key";
-  public static final String QUOTA = "quota";
-  public static final String QUOTA_IN_BYTES = "quotaInBytes";
-  public static final String CLIENT_ID = "clientID";
-  public static final String OWNER = "owner";
-  public static final String ADMIN = "admin";
-  public static final String USERNAME = "username";
-  public static final String PREV_KEY = "prevKey";
-  public static final String START_KEY = "startKey";
-  public static final String MAX_KEYS = "maxKeys";
-  public static final String PREFIX = "prefix";
-  public static final String KEY_PREFIX = "keyPrefix";
-  public static final String ACLS = "acls";
-  public static final String USER_ACL = "userAcl";
-  public static final String ADD_ACLS = "addAcls";
-  public static final String REMOVE_ACLS = "removeAcls";
-  public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets";
-  public static final String TO_KEY_NAME = "toKeyName";
-  public static final String STORAGE_TYPE = "storageType";
-  public static final String IS_VERSION_ENABLED = "isVersionEnabled";
-  public static final String CREATION_TIME = "creationTime";
-  public static final String DATA_SIZE = "dataSize";
-  public static final String REPLICATION_TYPE = "replicationType";
-  public static final String REPLICATION_FACTOR = "replicationFactor";
-  public static final String KEY_LOCATION_INFO = "keyLocationInfo";
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
deleted file mode 100644
index 8c1d6f0..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Interface to define AuditAction.
- */
-public interface AuditAction {
-  /**
-   * Implementation must override.
-   * @return String
-   */
-  String getAction();
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
deleted file mode 100644
index 098ab6b..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enum to define AuditEventStatus values.
- */
-public enum AuditEventStatus {
-  SUCCESS("SUCCESS"),
-  FAILURE("FAILURE");
-
-  private String status;
-
-  AuditEventStatus(String status){
-    this.status = status;
-  }
-
-  public String getStatus() {
-    return status;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
deleted file mode 100644
index 9357774..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.logging.log4j.Level;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.spi.ExtendedLogger;
-
-
-/**
- * Class to define Audit Logger for Ozone.
- */
-public class AuditLogger {
-
-  private ExtendedLogger logger;
-  private static final String FQCN = AuditLogger.class.getName();
-  private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
-  private static final Marker READ_MARKER = AuditMarker.READ.getMarker();
-
-  /**
-   * Parametrized Constructor to initialize logger.
-   * @param type Audit Logger Type
-   */
-  public AuditLogger(AuditLoggerType type){
-    initializeLogger(type);
-  }
-
-  /**
-   * Initializes the logger with specific type.
-   * @param loggerType specified one of the values from enum AuditLoggerType.
-   */
-  private void initializeLogger(AuditLoggerType loggerType){
-    this.logger = LogManager.getContext(false).getLogger(loggerType.getType());
-  }
-
-  @VisibleForTesting
-  public ExtendedLogger getLogger() {
-    return logger;
-  }
-
-  public void logWriteSuccess(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.INFO, WRITE_MARKER, msg, null);
-  }
-
-  public void logWriteFailure(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.ERROR, WRITE_MARKER, msg,
-        msg.getThrowable());
-  }
-
-  public void logReadSuccess(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.INFO, READ_MARKER, msg, null);
-  }
-
-  public void logReadFailure(AuditMessage msg) {
-    this.logger.logIfEnabled(FQCN, Level.ERROR, READ_MARKER, msg,
-        msg.getThrowable());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
deleted file mode 100644
index 18241c7..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-/**
- * Enumeration for defining types of Audit Loggers in Ozone.
- */
-public enum AuditLoggerType {
-  DNLOGGER("DNAudit"),
-  OMLOGGER("OMAudit"),
-  SCMLOGGER("SCMAudit");
-
-  private String type;
-
-  public String getType() {
-    return type;
-  }
-
-  AuditLoggerType(String type){
-    this.type = type;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
deleted file mode 100644
index 505b958..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import org.apache.logging.log4j.Marker;
-import org.apache.logging.log4j.MarkerManager;
-
-/**
- * Defines audit marker types.
- */
-public enum AuditMarker {
-  WRITE(MarkerManager.getMarker("WRITE")),
-  READ(MarkerManager.getMarker("READ"));
-
-  private Marker marker;
-
-  AuditMarker(Marker marker){
-    this.marker = marker;
-  }
-
-  public Marker getMarker(){
-    return marker;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
deleted file mode 100644
index 1569ffe..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.audit;
-
-import org.apache.logging.log4j.message.Message;
-
-import java.util.Map;
-
-/**
- * Defines audit message structure.
- */
-public class AuditMessage implements Message {
-
-  private String message;
-  private Throwable throwable;
-
-  private static final String MSG_PATTERN =
-      "user=%s | ip=%s | op=%s %s | ret=%s";
-
-  public AuditMessage(){
-
-  }
-
-  @Override
-  public String getFormattedMessage() {
-    return message;
-  }
-
-  @Override
-  public String getFormat() {
-    return null;
-  }
-
-  @Override
-  public Object[] getParameters() {
-    return new Object[0];
-  }
-
-  @Override
-  public Throwable getThrowable() {
-    return throwable;
-  }
-
-  /**
-   * Use when there are custom string to be added to default msg.
-   * @param customMessage custom string
-   */
-  private void appendMessage(String customMessage) {
-    this.message += customMessage;
-  }
-
-  public String getMessage() {
-    return message;
-  }
-
-  public void setMessage(String message) {
-    this.message = message;
-  }
-
-  public void setThrowable(Throwable throwable) {
-    this.throwable = throwable;
-  }
-
-  /**
-   * Builder class for AuditMessage.
-   */
-  public static class Builder {
-    private Throwable throwable;
-    private String user;
-    private String ip;
-    private String op;
-    private Map<String, String> params;
-    private String ret;
-
-    public Builder(){
-
-    }
-
-    public Builder setUser(String usr){
-      this.user = usr;
-      return this;
-    }
-
-    public Builder atIp(String ipAddr){
-      this.ip = ipAddr;
-      return this;
-    }
-
-    public Builder forOperation(String operation){
-      this.op = operation;
-      return this;
-    }
-
-    public Builder withParams(Map<String, String> args){
-      this.params = args;
-      return this;
-    }
-
-    public Builder withResult(String result){
-      this.ret = result;
-      return this;
-    }
-
-    public Builder withException(Throwable ex){
-      this.throwable = ex;
-      return this;
-    }
-
-    public AuditMessage build(){
-      AuditMessage auditMessage = new AuditMessage();
-      auditMessage.message = String.format(MSG_PATTERN,
-          this.user, this.ip, this.op, this.params, this.ret);
-      auditMessage.throwable = this.throwable;
-      return auditMessage;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
deleted file mode 100644
index d388bca..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import java.util.Map;
-
-/**
- * Interface to make an entity auditable.
- */
-public interface Auditable {
-  /**
-   * Must override in implementation.
-   * @return Map<String, String> with values to be logged in audit.
-   */
-  Map<String, String> toAuditMap();
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java
deleted file mode 100644
index 51c0298..0000000
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditor.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.audit;
-
-import java.util.Map;
-
-/**
- * Interface to mark an actor as Auditor.
- */
-public interface Auditor {
-
-  AuditMessage buildAuditMessageForSuccess(
-      AuditAction op, Map<String, String> auditMap);
-
-  AuditMessage buildAuditMessageForFailure(
-      AuditAction op, Map<String, String> auditMap, Throwable throwable);
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to