http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
new file mode 100644
index 0000000..f234ad3
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerLocationProtocolProtos
+    .StorageContainerLocationProtocolService;
+import org.apache.hadoop.ipc.ProtocolInfo;
+
+/**
+ * Protocol used from an HDFS node to StorageContainerManager.  This extends 
the
+ * Protocol Buffers service interface to add Hadoop-specific annotations.
+ */
+@ProtocolInfo(protocolName =
+    "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.Private
+public interface StorageContainerLocationProtocolPB
+    extends StorageContainerLocationProtocolService.BlockingInterface {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
new file mode 100644
index 0000000..652ae60
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.protocolPB;
+
+/**
+ * This package contains classes for the client of the storage container
+ * protocol.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
new file mode 100644
index 0000000..1559816
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -0,0 +1,396 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+    .StorageContainerException;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ContainerCommandResponseProto;
+import 
org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .GetKeyResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .GetSmallFileRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .GetSmallFileResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
+import 
org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .PutSmallFileRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadChunkResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadContainerRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .ReadContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
+    .WriteChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+
+import java.io.IOException;
+
+/**
+ * Implementation of all container protocol calls performed by Container
+ * clients.
+ */
+public final class ContainerProtocolCalls  {
+
+  /**
+   * There is no need to instantiate this class.
+   */
+  private ContainerProtocolCalls() {
+  }
+
+  /**
+   * Calls the container protocol to get a container key.
+   *
+   * @param xceiverClient client to perform call
+   * @param containerKeyData key data to identify container
+   * @param traceID container protocol call args
+   * @return container protocol get key response
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static GetKeyResponseProto getKey(XceiverClientSpi xceiverClient,
+      KeyData containerKeyData, String traceID) throws IOException {
+    GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyData(containerKeyData);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.GetKey)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setGetKey(readKeyRequest)
+        .build();
+    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+    return response.getGetKey();
+  }
+
+  /**
+   * Calls the container protocol to put a container key.
+   *
+   * @param xceiverClient client to perform call
+   * @param containerKeyData key data to identify container
+   * @param traceID container protocol call args
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static void putKey(XceiverClientSpi xceiverClient,
+      KeyData containerKeyData, String traceID) throws IOException {
+    PutKeyRequestProto.Builder createKeyRequest = PutKeyRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyData(containerKeyData);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.PutKey)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setPutKey(createKeyRequest)
+        .build();
+    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Calls the container protocol to read a chunk.
+   *
+   * @param xceiverClient client to perform call
+   * @param chunk information about chunk to read
+   * @param key the key name
+   * @param traceID container protocol call args
+   * @return container protocol read chunk response
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static ReadChunkResponseProto readChunk(XceiverClientSpi 
xceiverClient,
+      ChunkInfo chunk, String key, String traceID)
+      throws IOException {
+    ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyName(key)
+        .setChunkData(chunk);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.ReadChunk)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setReadChunk(readChunkRequest)
+        .build();
+    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+    return response.getReadChunk();
+  }
+
+  /**
+   * Calls the container protocol to write a chunk.
+   *
+   * @param xceiverClient client to perform call
+   * @param chunk information about chunk to write
+   * @param key the key name
+   * @param data the data of the chunk to write
+   * @param traceID container protocol call args
+   * @throws IOException if there is an I/O error while performing the call
+   */
+  public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo 
chunk,
+      String key, ByteString data, String traceID)
+      throws IOException {
+    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
+        .newBuilder()
+        .setPipeline(xceiverClient.getPipeline().getProtobufMessage())
+        .setKeyName(key)
+        .setChunkData(chunk)
+        .setData(data);
+    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.WriteChunk)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setWriteChunk(writeChunkRequest)
+        .build();
+    ContainerCommandResponseProto response = 
xceiverClient.sendCommand(request);
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Allows writing a small file using single RPC. This takes the container
+   * name, key name and data to write sends all that data to the container 
using
+   * a single RPC. This API is designed to be used for files which are smaller
+   * than 1 MB.
+   *
+   * @param client - client that communicates with the container.
+   * @param containerName - Name of the container
+   * @param key - Name of the Key
+   * @param data - Data to be written into the container.
+   * @param traceID - Trace ID for logging purpose.
+   * @throws IOException
+   */
+  public static void writeSmallFile(XceiverClientSpi client,
+      String containerName, String key, byte[] data, String traceID)
+      throws IOException {
+
+    KeyData containerKeyData =
+        KeyData.newBuilder().setContainerName(containerName).setName(key)
+            .build();
+    PutKeyRequestProto.Builder createKeyRequest =
+        PutKeyRequestProto.newBuilder()
+            .setPipeline(client.getPipeline().getProtobufMessage())
+            .setKeyData(containerKeyData);
+
+    KeyValue keyValue =
+        KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
+            .build();
+    ChunkInfo chunk =
+        ChunkInfo.newBuilder().setChunkName(key + "_chunk").setOffset(0)
+            .setLen(data.length).addMetadata(keyValue).build();
+
+    PutSmallFileRequestProto putSmallFileRequest =
+        PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)
+            .setKey(createKeyRequest).setData(ByteString.copyFrom(data))
+            .build();
+
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request =
+        ContainerCommandRequestProto.newBuilder()
+            .setCmdType(Type.PutSmallFile)
+            .setTraceID(traceID)
+            .setDatanodeUuid(id)
+            .setPutSmallFile(putSmallFileRequest)
+            .build();
+    ContainerCommandResponseProto response = client.sendCommand(request);
+    validateContainerResponse(response);
+  }
+
+  /**
+   * createContainer call that creates a container on the datanode.
+   * @param client  - client
+   * @param traceID - traceID
+   * @throws IOException
+   */
+  public static void createContainer(XceiverClientSpi client, String traceID)
+      throws IOException {
+    ContainerProtos.CreateContainerRequestProto.Builder createRequest =
+        ContainerProtos.CreateContainerRequestProto
+            .newBuilder();
+    ContainerProtos.ContainerData.Builder containerData = ContainerProtos
+        .ContainerData.newBuilder();
+    containerData.setName(client.getPipeline().getContainerName());
+    createRequest.setPipeline(client.getPipeline().getProtobufMessage());
+    createRequest.setContainerData(containerData.build());
+
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(ContainerProtos.Type.CreateContainer);
+    request.setCreateContainer(createRequest);
+    request.setDatanodeUuid(id);
+    request.setTraceID(traceID);
+    ContainerCommandResponseProto response = client.sendCommand(
+        request.build());
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Deletes a container from a pipeline.
+   *
+   * @param client
+   * @param force whether or not to forcibly delete the container.
+   * @param traceID
+   * @throws IOException
+   */
+  public static void deleteContainer(XceiverClientSpi client,
+      boolean force, String traceID) throws IOException {
+    ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
+        ContainerProtos.DeleteContainerRequestProto.newBuilder();
+    deleteRequest.setName(client.getPipeline().getContainerName());
+    deleteRequest.setPipeline(client.getPipeline().getProtobufMessage());
+    deleteRequest.setForceDelete(force);
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(ContainerProtos.Type.DeleteContainer);
+    request.setDeleteContainer(deleteRequest);
+    request.setTraceID(traceID);
+    request.setDatanodeUuid(id);
+    ContainerCommandResponseProto response =
+        client.sendCommand(request.build());
+    validateContainerResponse(response);
+  }
+
+  /**
+   * Close a container.
+   *
+   * @param client
+   * @param traceID
+   * @throws IOException
+   */
+  public static void closeContainer(XceiverClientSpi client, String traceID)
+      throws IOException {
+    ContainerProtos.CloseContainerRequestProto.Builder closeRequest =
+        ContainerProtos.CloseContainerRequestProto.newBuilder();
+    closeRequest.setPipeline(client.getPipeline().getProtobufMessage());
+
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(Type.CloseContainer);
+    request.setCloseContainer(closeRequest);
+    request.setTraceID(traceID);
+    request.setDatanodeUuid(id);
+    ContainerCommandResponseProto response =
+        client.sendCommand(request.build());
+    validateContainerResponse(response);
+  }
+
+  /**
+   * readContainer call that gets meta data from an existing container.
+   *
+   * @param client - client
+   * @param traceID - trace ID
+   * @throws IOException
+   */
+  public static ReadContainerResponseProto readContainer(
+      XceiverClientSpi client, String containerName,
+      String traceID) throws IOException {
+    ReadContainerRequestProto.Builder readRequest =
+        ReadContainerRequestProto.newBuilder();
+    readRequest.setName(containerName);
+    readRequest.setPipeline(client.getPipeline().getProtobufMessage());
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(Type.ReadContainer);
+    request.setReadContainer(readRequest);
+    request.setDatanodeUuid(id);
+    request.setTraceID(traceID);
+    ContainerCommandResponseProto response =
+        client.sendCommand(request.build());
+    validateContainerResponse(response);
+    return response.getReadContainer();
+  }
+
+  /**
+   * Reads the data given the container name and key.
+   *
+   * @param client
+   * @param containerName - name of the container
+   * @param key - key
+   * @param traceID - trace ID
+   * @return GetSmallFileResponseProto
+   * @throws IOException
+   */
+  public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi 
client,
+      String containerName, String key, String traceID) throws IOException {
+    KeyData containerKeyData = KeyData
+        .newBuilder()
+        .setContainerName(containerName)
+        .setName(key).build();
+
+    GetKeyRequestProto.Builder getKey = GetKeyRequestProto
+        .newBuilder()
+        .setPipeline(client.getPipeline().getProtobufMessage())
+        .setKeyData(containerKeyData);
+    ContainerProtos.GetSmallFileRequestProto getSmallFileRequest =
+        GetSmallFileRequestProto
+            .newBuilder().setKey(getKey)
+            .build();
+    String id = client.getPipeline().getLeader().getUuidString();
+    ContainerCommandRequestProto request = ContainerCommandRequestProto
+        .newBuilder()
+        .setCmdType(Type.GetSmallFile)
+        .setTraceID(traceID)
+        .setDatanodeUuid(id)
+        .setGetSmallFile(getSmallFileRequest)
+        .build();
+    ContainerCommandResponseProto response = client.sendCommand(request);
+    validateContainerResponse(response);
+    return response.getGetSmallFile();
+  }
+
+  /**
+   * Validates a response from a container protocol call.  Any non-successful
+   * return code is mapped to a corresponding exception and thrown.
+   *
+   * @param response container protocol call response
+   * @throws IOException if the container protocol call failed
+   */
+  private static void validateContainerResponse(
+      ContainerCommandResponseProto response
+  ) throws StorageContainerException {
+    if (response.getResult() == ContainerProtos.Result.SUCCESS) {
+      return;
+    }
+    throw new StorageContainerException(
+        response.getMessage(), response.getResult());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
new file mode 100644
index 0000000..8e98158
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+/**
+ * This package contains StorageContainerManager classes.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
new file mode 100644
index 0000000..ff0ac4e
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone;
+
+import java.util.Objects;
+
+/**
+ * OzoneACL classes define bucket ACLs used in OZONE.
+ *
+ * ACLs in Ozone follow this pattern.
+ * • user:name:rw
+ * • group:name:rw
+ * • world::rw
+ */
+public class OzoneAcl {
+  private OzoneACLType type;
+  private String name;
+  private OzoneACLRights rights;
+
+  /**
+   * Constructor for OzoneAcl.
+   */
+  public OzoneAcl() {
+  }
+
+  /**
+   * Constructor for OzoneAcl.
+   *
+   * @param type - Type
+   * @param name - Name of user
+   * @param rights - Rights
+   */
+  public OzoneAcl(OzoneACLType type, String name, OzoneACLRights rights) {
+    this.name = name;
+    this.rights = rights;
+    this.type = type;
+    if (type == OzoneACLType.WORLD && name.length() != 0) {
+      throw new IllegalArgumentException("Unexpected name part in world type");
+    }
+    if (((type == OzoneACLType.USER) || (type == OzoneACLType.GROUP))
+        && (name.length() == 0)) {
+      throw new IllegalArgumentException("User or group name is required");
+    }
+  }
+
+  /**
+   * Parses an ACL string and returns the ACL object.
+   *
+   * @param acl - Acl String , Ex. user:anu:rw
+   *
+   * @return - Ozone ACLs
+   */
+  public static OzoneAcl parseAcl(String acl) throws IllegalArgumentException {
+    if ((acl == null) || acl.isEmpty()) {
+      throw new IllegalArgumentException("ACLs cannot be null or empty");
+    }
+    String[] parts = acl.trim().split(":");
+    if (parts.length < 3) {
+      throw new IllegalArgumentException("ACLs are not in expected format");
+    }
+
+    OzoneACLType aclType = OzoneACLType.valueOf(parts[0].toUpperCase());
+    OzoneACLRights rights = OzoneACLRights.getACLRight(parts[2].toLowerCase());
+
+    // TODO : Support sanitation of these user names by calling into
+    // userAuth Interface.
+    return new OzoneAcl(aclType, parts[1], rights);
+  }
+
+  @Override
+  public String toString() {
+    return type + ":" + name + ":" + OzoneACLRights.getACLRightsString(rights);
+  }
+
+  /**
+   * Returns a hash code value for the object. This method is
+   * supported for the benefit of hash tables.
+   *
+   * @return a hash code value for this object.
+   *
+   * @see Object#equals(Object)
+   * @see System#identityHashCode
+   */
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.getName(), this.getRights().toString(),
+                        this.getType().toString());
+  }
+
+  /**
+   * Returns name.
+   *
+   * @return name
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * Returns Rights.
+   *
+   * @return - Rights
+   */
+  public OzoneACLRights getRights() {
+    return rights;
+  }
+
+  /**
+   * Returns Type.
+   *
+   * @return type
+   */
+  public OzoneACLType getType() {
+    return type;
+  }
+
+  /**
+   * Indicates whether some other object is "equal to" this one.
+   *
+   * @param obj the reference object with which to compare.
+   *
+   * @return {@code true} if this object is the same as the obj
+   * argument; {@code false} otherwise.
+   */
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    OzoneAcl otherAcl = (OzoneAcl) obj;
+    return otherAcl.getName().equals(this.getName()) &&
+        otherAcl.getRights() == this.getRights() &&
+        otherAcl.getType() == this.getType();
+  }
+
+  /**
+   * ACL types.
+   */
+  public enum OzoneACLType {
+    USER(OzoneConsts.OZONE_ACL_USER_TYPE),
+    GROUP(OzoneConsts.OZONE_ACL_GROUP_TYPE),
+    WORLD(OzoneConsts.OZONE_ACL_WORLD_TYPE);
+
+    /**
+     * String value for this Enum.
+     */
+    private final String value;
+
+    /**
+     * Init OzoneACLtypes enum.
+     *
+     * @param val String type for this enum.
+     */
+    OzoneACLType(String val) {
+      value = val;
+    }
+  }
+
+  /**
+   * ACL rights.
+   */
+  public enum OzoneACLRights {
+    READ, WRITE, READ_WRITE;
+
+    /**
+     * Returns the ACL rights based on passed in String.
+     *
+     * @param type ACL right string
+     *
+     * @return OzoneACLRights
+     */
+    public static OzoneACLRights getACLRight(String type) {
+      if (type == null || type.isEmpty()) {
+        throw new IllegalArgumentException("ACL right cannot be empty");
+      }
+
+      switch (type) {
+      case OzoneConsts.OZONE_ACL_READ:
+        return OzoneACLRights.READ;
+      case OzoneConsts.OZONE_ACL_WRITE:
+        return OzoneACLRights.WRITE;
+      case OzoneConsts.OZONE_ACL_READ_WRITE:
+      case OzoneConsts.OZONE_ACL_WRITE_READ:
+        return OzoneACLRights.READ_WRITE;
+      default:
+        throw new IllegalArgumentException("ACL right is not recognized");
+      }
+
+    }
+
+    /**
+     * Returns String representation of ACL rights.
+     * @param acl OzoneACLRights
+     * @return String representation of acl
+     */
+    public static String getACLRightsString(OzoneACLRights acl) {
+      switch(acl) {
+      case READ:
+        return OzoneConsts.OZONE_ACL_READ;
+      case WRITE:
+        return OzoneConsts.OZONE_ACL_WRITE;
+      case READ_WRITE:
+        return OzoneConsts.OZONE_ACL_READ_WRITE;
+      default:
+        throw new IllegalArgumentException("ACL right is not recognized");
+      }
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
new file mode 100644
index 0000000..ef96f379
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -0,0 +1,238 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+
+/**
+ * This class contains constants for configuration keys used in Ozone.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public final class OzoneConfigKeys {
+  public static final String DFS_CONTAINER_IPC_PORT =
+      "dfs.container.ipc";
+  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859;
+
+  /**
+   *
+   * When set to true, allocate a random free port for ozone container,
+   * so that a mini cluster is able to launch multiple containers on a node.
+   *
+   * When set to false (default), container port is fixed as specified by
+   * DFS_CONTAINER_IPC_PORT_DEFAULT.
+   */
+  public static final String DFS_CONTAINER_IPC_RANDOM_PORT =
+      "dfs.container.ipc.random.port";
+  public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
+      false;
+
+  /**
+   * Ratis Port where containers listen to.
+   */
+  public static final String DFS_CONTAINER_RATIS_IPC_PORT =
+      "dfs.container.ratis.ipc";
+  public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858;
+
+  /**
+   * When set to true, allocate a random free port for ozone container, so that
+   * a mini cluster is able to launch multiple containers on a node.
+   */
+  public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT =
+      "dfs.container.ratis.ipc.random.port";
+  public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT =
+      false;
+
+  public static final String OZONE_LOCALSTORAGE_ROOT =
+      "ozone.localstorage.root";
+  public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
+  public static final String OZONE_ENABLED =
+      "ozone.enabled";
+  public static final boolean OZONE_ENABLED_DEFAULT = false;
+  public static final String OZONE_HANDLER_TYPE_KEY =
+      "ozone.handler.type";
+  public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
+  public static final String OZONE_TRACE_ENABLED_KEY =
+      "ozone.trace.enabled";
+  public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
+
+  public static final String OZONE_METADATA_DIRS =
+      "ozone.metadata.dirs";
+
+  public static final String OZONE_METADATA_STORE_IMPL =
+      "ozone.metastore.impl";
+  public static final String OZONE_METADATA_STORE_IMPL_LEVELDB =
+      "LevelDB";
+  public static final String OZONE_METADATA_STORE_IMPL_ROCKSDB =
+      "RocksDB";
+  public static final String OZONE_METADATA_STORE_IMPL_DEFAULT =
+      OZONE_METADATA_STORE_IMPL_ROCKSDB;
+
+  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS =
+      "ozone.metastore.rocksdb.statistics";
+
+  public static final String  OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT =
+      "ALL";
+  public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF =
+      "OFF";
+
+  public static final String OZONE_CONTAINER_CACHE_SIZE =
+      "ozone.container.cache.size";
+  public static final int OZONE_CONTAINER_CACHE_DEFAULT = 1024;
+
+  public static final String OZONE_SCM_BLOCK_SIZE_IN_MB =
+      "ozone.scm.block.size.in.mb";
+  public static final long OZONE_SCM_BLOCK_SIZE_DEFAULT = 256;
+
+  /**
+   * Ozone administrator users delimited by comma.
+   * If not set, only the user who launches an ozone service will be the
+   * admin user. This property must be set if ozone services are started by
+   * different users. Otherwise the RPC layer will reject calls from
+   * other servers which are started by users not in the list.
+   * */
+  public static final String OZONE_ADMINISTRATORS =
+      "ozone.administrators";
+
+  public static final String OZONE_CLIENT_PROTOCOL =
+      "ozone.client.protocol";
+
+  // This defines the overall connection limit for the connection pool used in
+  // RestClient.
+  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_MAX =
+      "ozone.rest.client.http.connection.max";
+  public static final int OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT = 100;
+
+  // This defines the connection limit per one HTTP route/host.
+  public static final String OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX =
+      "ozone.rest.client.http.connection.per-route.max";
+
+  public static final int
+      OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT = 20;
+
+  public static final String OZONE_CLIENT_SOCKET_TIMEOUT =
+      "ozone.client.socket.timeout";
+  public static final int OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT = 5000;
+  public static final String OZONE_CLIENT_CONNECTION_TIMEOUT =
+      "ozone.client.connection.timeout";
+  public static final int OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT = 5000;
+
+  public static final String OZONE_REPLICATION = "ozone.replication";
+  public static final int OZONE_REPLICATION_DEFAULT =
+      ReplicationFactor.THREE.getValue();
+
+  public static final String OZONE_REPLICATION_TYPE = "ozone.replication.type";
+  public static final String OZONE_REPLICATION_TYPE_DEFAULT =
+      ReplicationType.RATIS.toString();
+
+  /**
+   * Configuration property to configure the cache size of client list calls.
+   */
+  public static final String OZONE_CLIENT_LIST_CACHE_SIZE =
+      "ozone.client.list.cache";
+  public static final int OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT = 1000;
+
+  /**
+   * Configuration properties for Ozone Block Deleting Service.
+   */
+  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL =
+      "ozone.block.deleting.service.interval";
+  public static final String OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT
+      = "60s";
+
+  /**
+   * The interval of open key clean service.
+   */
+  public static final String OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS =
+      "ozone.open.key.cleanup.service.interval.seconds";
+  public static final int
+      OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT
+      = 24 * 3600; // a total of 24 hour
+
+  /**
+   * An open key gets cleaned up when it is being in open state for too long.
+   */
+  public static final String OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS =
+      "ozone.open.key.expire.threshold";
+  public static final int OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT =
+      24 * 3600;
+
+  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT =
+      "ozone.block.deleting.service.timeout";
+  public static final String OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT
+      = "300s"; // 300s for default
+
+  public static final String OZONE_KEY_PREALLOCATION_MAXSIZE =
+      "ozone.key.preallocation.maxsize";
+  public static final long OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT
+      = 128 * OzoneConsts.MB;
+
+  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER =
+      "ozone.block.deleting.limit.per.task";
+  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
+      = 1000;
+
+  public static final String OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL
+      = "ozone.block.deleting.container.limit.per.interval";
+  public static final int
+      OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10;
+
+  public static final String OZONE_CONTAINER_REPORT_INTERVAL =
+      "ozone.container.report.interval";
+  public static final String OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT =
+      "60s";
+
+  public static final String DFS_CONTAINER_RATIS_ENABLED_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
+  public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY;
+  public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY;
+  public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY;
+  public static final int DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT;
+  public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
+  public static final int DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
+      = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
+  public static final int DFS_CONTAINER_CHUNK_MAX_SIZE
+      = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
+  public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
+      "dfs.container.ratis.datanode.storage.dir";
+
+  public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+      "ozone.web.authentication.kerberos.principal";
+
+  /**
+   * There is no need to instantiate this class.
+   */
+  private OzoneConfigKeys() {
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
new file mode 100644
index 0000000..2f9e469
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Set of constants used in Ozone implementation.
+ */
+@InterfaceAudience.Private
+public final class OzoneConsts {
+
+
+  public static final String STORAGE_DIR = "scm";
+  public static final String SCM_ID = "scmUuid";
+
+  public static final String OZONE_SIMPLE_ROOT_USER = "root";
+  public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
+
+  /*
+   * BucketName length is used for both buckets and volume lengths
+   */
+  public static final int OZONE_MIN_BUCKET_NAME_LENGTH = 3;
+  public static final int OZONE_MAX_BUCKET_NAME_LENGTH = 63;
+
+  public static final String OZONE_ACL_USER_TYPE = "user";
+  public static final String OZONE_ACL_GROUP_TYPE = "group";
+  public static final String OZONE_ACL_WORLD_TYPE = "world";
+
+  public static final String OZONE_ACL_READ = "r";
+  public static final String OZONE_ACL_WRITE = "w";
+  public static final String OZONE_ACL_READ_WRITE = "rw";
+  public static final String OZONE_ACL_WRITE_READ = "wr";
+
+  public static final String OZONE_DATE_FORMAT =
+      "EEE, dd MMM yyyy HH:mm:ss zzz";
+  public static final String OZONE_TIME_ZONE = "GMT";
+
+  public static final String OZONE_COMPONENT = "component";
+  public static final String OZONE_FUNCTION  = "function";
+  public static final String OZONE_RESOURCE = "resource";
+  public static final String OZONE_USER = "user";
+  public static final String OZONE_REQUEST = "request";
+
+  public static final String CONTAINER_EXTENSION = ".container";
+  public static final String CONTAINER_META = ".meta";
+
+  //  container storage is in the following format.
+  //  Data Volume basePath/containers/<containerName>/metadata and
+  //  Data Volume basePath/containers/<containerName>/data/...
+  public static final String CONTAINER_PREFIX  = "containers";
+  public static final String CONTAINER_META_PATH = "metadata";
+  public static final String CONTAINER_DATA_PATH = "data";
+  public static final String CONTAINER_TEMPORARY_CHUNK_PREFIX = "tmp";
+  public static final String CONTAINER_CHUNK_NAME_DELIMITER = ".";
+  public static final String CONTAINER_ROOT_PREFIX = "repository";
+
+  public static final String FILE_HASH = "SHA-256";
+  public final static String CHUNK_OVERWRITE = "OverWriteRequested";
+
+  public static final int CHUNK_SIZE = 1 * 1024 * 1024; // 1 MB
+  public static final long KB = 1024L;
+  public static final long MB = KB * 1024L;
+  public static final long GB = MB * 1024L;
+  public static final long TB = GB * 1024L;
+
+  /**
+   * level DB names used by SCM and data nodes.
+   */
+  public static final String CONTAINER_DB_SUFFIX = "container.db";
+  public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
+  public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
+  public static final String BLOCK_DB = "block.db";
+  public static final String NODEPOOL_DB = "nodepool.db";
+  public static final String OPEN_CONTAINERS_DB = "openContainers.db";
+  public static final String DELETED_BLOCK_DB = "deletedBlock.db";
+  public static final String KSM_DB_NAME = "ksm.db";
+
+  /**
+   * Supports Bucket Versioning.
+   */
+  public enum Versioning {NOT_DEFINED, ENABLED, DISABLED}
+
+  /**
+   * Ozone handler types.
+   */
+  public static final String OZONE_HANDLER_DISTRIBUTED = "distributed";
+  public static final String OZONE_HANDLER_LOCAL = "local";
+
+  public static final String DELETING_KEY_PREFIX = "#deleting#";
+  public static final String OPEN_KEY_PREFIX = "#open#";
+  public static final String OPEN_KEY_ID_DELIMINATOR = "#";
+
+  /**
+   * KSM LevelDB prefixes.
+   *
+   * KSM DB stores metadata as KV pairs with certain prefixes,
+   * prefix is used to improve the performance to get related
+   * metadata.
+   *
+   * KSM DB Schema:
+   *  ----------------------------------------------------------
+   *  |  KEY                                     |     VALUE   |
+   *  ----------------------------------------------------------
+   *  | $userName                                |  VolumeList |
+   *  ----------------------------------------------------------
+   *  | /#volumeName                             |  VolumeInfo |
+   *  ----------------------------------------------------------
+   *  | /#volumeName/#bucketName                 |  BucketInfo |
+   *  ----------------------------------------------------------
+   *  | /volumeName/bucketName/keyName           |  KeyInfo    |
+   *  ----------------------------------------------------------
+   *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
+   *  ----------------------------------------------------------
+   */
+  public static final String KSM_VOLUME_PREFIX = "/#";
+  public static final String KSM_BUCKET_PREFIX = "/#";
+  public static final String KSM_KEY_PREFIX = "/";
+  public static final String KSM_USER_PREFIX = "$";
+
+  /**
+   * Max KSM Quota size of 1024 PB.
+   */
+  public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
+
+  /**
+   * Max number of keys returned per list buckets operation.
+   */
+  public static final int MAX_LISTBUCKETS_SIZE  = 1024;
+
+  /**
+   * Max number of keys returned per list keys operation.
+   */
+  public static final int MAX_LISTKEYS_SIZE  = 1024;
+
+  /**
+   * Max number of volumes returned per list volumes operation.
+   */
+  public static final int MAX_LISTVOLUMES_SIZE = 1024;
+
+  public static final int INVALID_PORT = -1;
+
+
+  // The ServiceListJSONServlet context attribute where KeySpaceManager
+  // instance gets stored.
+  public static final String KSM_CONTEXT_ATTRIBUTE = "ozone.ksm";
+
+  private OzoneConsts() {
+    // Never Constructed
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
new file mode 100644
index 0000000..38ce6cc
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .KeyBlocks;
+
+import java.util.List;
+
+/**
+ * A group of blocks relations relevant, e.g belong to a certain object key.
+ */
+public final class BlockGroup {
+
+  private String groupID;
+  private List<String> blockIDs;
+  private BlockGroup(String groupID, List<String> blockIDs) {
+    this.groupID = groupID;
+    this.blockIDs = blockIDs;
+  }
+
+  public List<String> getBlockIDList() {
+    return blockIDs;
+  }
+
+  public String getGroupID() {
+    return groupID;
+  }
+
+  public KeyBlocks getProto() {
+    return KeyBlocks.newBuilder().setKey(groupID)
+        .addAllBlocks(blockIDs).build();
+  }
+
+  /**
+   * Parses a KeyBlocks proto to a group of blocks.
+   * @param proto KeyBlocks proto.
+   * @return a group of blocks.
+   */
+  public static BlockGroup getFromProto(KeyBlocks proto) {
+    return BlockGroup.newBuilder().setKeyName(proto.getKey())
+        .addAllBlockIDs(proto.getBlocksList()).build();
+  }
+
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * BlockGroup instance builder.
+   */
+  public static class Builder {
+
+    private String groupID;
+    private List<String> blockIDs;
+
+    public Builder setKeyName(String blockGroupID) {
+      this.groupID = blockGroupID;
+      return this;
+    }
+
+    public Builder addAllBlockIDs(List<String> keyBlocks) {
+      this.blockIDs = keyBlocks;
+      return this;
+    }
+
+    public BlockGroup build() {
+      return new BlockGroup(groupID, blockIDs);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
new file mode 100644
index 0000000..ec54ac5
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmBlockResult;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
+    .DeleteScmBlockResult.Result;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Result to delete a group of blocks.
+ */
+public class DeleteBlockGroupResult {
+  private String objectKey;
+  private List<DeleteBlockResult> blockResultList;
+  public DeleteBlockGroupResult(String objectKey,
+      List<DeleteBlockResult> blockResultList) {
+    this.objectKey = objectKey;
+    this.blockResultList = blockResultList;
+  }
+
+  public String getObjectKey() {
+    return objectKey;
+  }
+
+  public List<DeleteBlockResult> getBlockResultList() {
+    return blockResultList;
+  }
+
+  public List<DeleteScmBlockResult> getBlockResultProtoList() {
+    List<DeleteScmBlockResult> resultProtoList =
+        new ArrayList<>(blockResultList.size());
+    for (DeleteBlockResult result : blockResultList) {
+      DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder()
+          .setKey(result.getKey())
+          .setResult(result.getResult()).build();
+      resultProtoList.add(proto);
+    }
+    return resultProtoList;
+  }
+
+  public static List<DeleteBlockResult> convertBlockResultProto(
+      List<DeleteScmBlockResult> results) {
+    List<DeleteBlockResult> protoResults = new ArrayList<>(results.size());
+    for (DeleteScmBlockResult result : results) {
+      protoResults.add(new DeleteBlockResult(result.getKey(),
+          result.getResult()));
+    }
+    return protoResults;
+  }
+
+  /**
+   * Only if all blocks are successfully deleted, this group is considered
+   * to be successfully executed.
+   *
+   * @return true if all blocks are successfully deleted, false otherwise.
+   */
+  public boolean isSuccess() {
+    for (DeleteBlockResult result : blockResultList) {
+      if (result.getResult() != Result.success) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * @return A list of deletion failed block IDs.
+   */
+  public List<String> getFailedBlocks() {
+    List<String> failedBlocks = blockResultList.stream()
+        .filter(result -> result.getResult() != Result.success)
+        .map(DeleteBlockResult::getKey).collect(Collectors.toList());
+    return failedBlocks;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
new file mode 100644
index 0000000..518b519
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java
@@ -0,0 +1,51 @@
+/**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * The exception is thrown when file system state is inconsistent
+ * and is not recoverable.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class InconsistentStorageStateException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public InconsistentStorageStateException(String descr) {
+    super(descr);
+  }
+
+  public InconsistentStorageStateException(File dir, String descr) {
+    super("Directory " + getFilePath(dir) + " is in an inconsistent state: "
+        + descr);
+  }
+
+  private static String getFilePath(File dir) {
+    try {
+      return dir.getCanonicalPath();
+    } catch (IOException e) {
+    }
+    return dir.getPath();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
new file mode 100644
index 0000000..fb30d92
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -0,0 +1,248 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Properties;
+
+/**
+ * Storage information file. This Class defines the methods to check
+ * the consistency of the storage dir and the version file.
+ * <p>
+ * Local storage information is stored in a separate file VERSION.
+ * It contains type of the node,
+ * the storage layout version, the SCM id, and
+ * the KSM/SCM state creation time.
+ *
+ */
+@InterfaceAudience.Private
+public abstract class Storage {
+  private static final Logger LOG = LoggerFactory.getLogger(Storage.class);
+
+  protected static final String STORAGE_DIR_CURRENT = "current";
+  protected static final String STORAGE_FILE_VERSION = "VERSION";
+
+  private final NodeType nodeType;
+  private final File root;
+  private final File storageDir;
+
+  private StorageState state;
+  private StorageInfo storageInfo;
+
+
+  /**
+   * Determines the state of the Version file.
+   */
+  public enum StorageState {
+    NON_EXISTENT, NOT_INITIALIZED, INITIALIZED
+  }
+
+  public Storage(NodeType type, File root, String sdName)
+      throws IOException {
+    this.nodeType = type;
+    this.root = root;
+    this.storageDir = new File(root, sdName);
+    this.state = getStorageState();
+    if (state == StorageState.INITIALIZED) {
+      this.storageInfo = new StorageInfo(type, getVersionFile());
+    } else {
+      this.storageInfo = new StorageInfo(
+          nodeType, StorageInfo.newClusterID(), Time.now());
+      setNodeProperties();
+    }
+  }
+
+  /**
+   * Gets the path of the Storage dir.
+   * @return Stoarge dir path
+   */
+  public String getStorageDir() {
+    return storageDir.getAbsoluteFile().toString();
+  }
+
+  /**
+   * Gets the state of the version file.
+   * @return the state of the Version file
+   */
+  public StorageState getState() {
+    return state;
+  }
+
+  public NodeType getNodeType() {
+    return storageInfo.getNodeType();
+  }
+
+  public String getClusterID() {
+    return storageInfo.getClusterID();
+  }
+
+  public long getCreationTime() {
+    return storageInfo.getCreationTime();
+  }
+
+  public void setClusterId(String clusterId) throws IOException {
+    if (state == StorageState.INITIALIZED) {
+      throw new IOException(
+          "Storage directory " + storageDir + " already initialized.");
+    } else {
+      storageInfo.setClusterId(clusterId);
+    }
+  }
+
+  /**
+   * Retreives the storageInfo instance to read/write the common
+   * version file properties.
+   * @return the instance of the storageInfo class
+   */
+  protected StorageInfo getStorageInfo() {
+    return storageInfo;
+  }
+
+  abstract protected Properties getNodeProperties();
+
+  /**
+   * Sets the Node properties spaecific to KSM/SCM.
+   */
+  private void setNodeProperties() {
+    Properties nodeProperties = getNodeProperties();
+    if (nodeProperties != null) {
+      for (String key : nodeProperties.stringPropertyNames()) {
+        storageInfo.setProperty(key, nodeProperties.getProperty(key));
+      }
+    }
+  }
+
+  /**
+   * Directory {@code current} contains latest files defining
+   * the file system meta-data.
+   *
+   * @return the directory path
+   */
+  private File getCurrentDir() {
+    return new File(storageDir, STORAGE_DIR_CURRENT);
+  }
+
+  /**
+   * File {@code VERSION} contains the following fields:
+   * <ol>
+   * <li>node type</li>
+   * <li>KSM/SCM state creation time</li>
+   * <li>other fields specific for this node type</li>
+   * </ol>
+   * The version file is always written last during storage directory updates.
+   * The existence of the version file indicates that all other files have
+   * been successfully written in the storage directory, the storage is valid
+   * and does not need to be recovered.
+   *
+   * @return the version file path
+   */
+  private File getVersionFile() {
+    return new File(getCurrentDir(), STORAGE_FILE_VERSION);
+  }
+
+
+  /**
+   * Check to see if current/ directory is empty. This method is used
+   * before determining to format the directory.
+   * @throws IOException if unable to list files under the directory.
+   */
+  private void checkEmptyCurrent() throws IOException {
+    File currentDir = getCurrentDir();
+    if (!currentDir.exists()) {
+      // if current/ does not exist, it's safe to format it.
+      return;
+    }
+    try (DirectoryStream<Path> dirStream = Files
+        .newDirectoryStream(currentDir.toPath())) {
+      if (dirStream.iterator().hasNext()) {
+        throw new InconsistentStorageStateException(getCurrentDir(),
+            "Can't initialize the storage directory because the current "
+                + "it is not empty.");
+      }
+    }
+  }
+
+  /**
+   * Check consistency of the storage directory.
+   *
+   * @return state {@link StorageState} of the storage directory
+   * @throws IOException
+   */
+  private StorageState getStorageState() throws IOException {
+    assert root != null : "root is null";
+    String rootPath = root.getCanonicalPath();
+    try { // check that storage exists
+      if (!root.exists()) {
+        // storage directory does not exist
+        LOG.warn("Storage directory " + rootPath + " does not exist");
+        return StorageState.NON_EXISTENT;
+      }
+      // or is inaccessible
+      if (!root.isDirectory()) {
+        LOG.warn(rootPath + "is not a directory");
+        return StorageState.NON_EXISTENT;
+      }
+      if (!FileUtil.canWrite(root)) {
+        LOG.warn("Cannot access storage directory " + rootPath);
+        return StorageState.NON_EXISTENT;
+      }
+    } catch (SecurityException ex) {
+      LOG.warn("Cannot access storage directory " + rootPath, ex);
+      return StorageState.NON_EXISTENT;
+    }
+
+    // check whether current directory is valid
+    File versionFile = getVersionFile();
+    boolean hasCurrent = versionFile.exists();
+
+    if (hasCurrent) {
+      return StorageState.INITIALIZED;
+    } else {
+      checkEmptyCurrent();
+      return StorageState.NOT_INITIALIZED;
+    }
+  }
+
+  /**
+   * Creates the Version file if not present,
+   * otherwise returns with IOException.
+   * @throws IOException
+   */
+  public void initialize() throws IOException {
+    if (state == StorageState.INITIALIZED) {
+      throw new IOException("Storage directory already initialized.");
+    }
+    if (!getCurrentDir().mkdirs()) {
+      throw new IOException("Cannot create directory " + getCurrentDir());
+    }
+    storageInfo.writeTo(getVersionFile());
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
new file mode 100644
index 0000000..0e98a4c
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Properties;
+import java.util.UUID;
+
+/**
+ * Common class for storage information. This class defines the common
+ * properties and functions to set them , write them into the version file
+ * and read them from the version file.
+ *
+ */
+@InterfaceAudience.Private
+public class StorageInfo {
+
+  private Properties properties = new Properties();
+
+  /**
+   * Property to hold node type.
+   */
+  private static final String NODE_TYPE = "nodeType";
+  /**
+   * Property to hold ID of the cluster.
+   */
+  private static final String CLUSTER_ID = "clusterID";
+  /**
+   * Property to hold creation time of the storage.
+   */
+  private static final String CREATION_TIME = "cTime";
+
+  /**
+   * Constructs StorageInfo instance.
+   * @param type
+   *          Type of the node using the storage
+   * @param cid
+   *          Cluster ID
+   * @param cT
+   *          Cluster creation Time
+
+   * @throws IOException
+   */
+  public StorageInfo(NodeType type, String cid, long cT)
+      throws IOException {
+    Preconditions.checkNotNull(type);
+    Preconditions.checkNotNull(cid);
+    Preconditions.checkNotNull(cT);
+    properties.setProperty(NODE_TYPE, type.name());
+    properties.setProperty(CLUSTER_ID, cid);
+    properties.setProperty(CREATION_TIME, String.valueOf(cT));
+  }
+
+  public StorageInfo(NodeType type, File propertiesFile)
+      throws IOException {
+    this.properties = readFrom(propertiesFile);
+    verifyNodeType(type);
+    verifyClusterId();
+    verifyCreationTime();
+  }
+
+  public NodeType getNodeType() {
+    return NodeType.valueOf(properties.getProperty(NODE_TYPE));
+  }
+
+  public String getClusterID() {
+    return properties.getProperty(CLUSTER_ID);
+  }
+
+  public Long  getCreationTime() {
+    String creationTime = properties.getProperty(CREATION_TIME);
+    if(creationTime != null) {
+      return Long.parseLong(creationTime);
+    }
+    return null;
+  }
+
+  public String getProperty(String key) {
+    return properties.getProperty(key);
+  }
+
+  public void setProperty(String key, String value) {
+    properties.setProperty(key, value);
+  }
+
+  public void setClusterId(String clusterId) {
+    properties.setProperty(CLUSTER_ID, clusterId);
+  }
+
+  private void verifyNodeType(NodeType type)
+      throws InconsistentStorageStateException {
+    NodeType nodeType = getNodeType();
+    Preconditions.checkNotNull(nodeType);
+    if(type != nodeType) {
+      throw new InconsistentStorageStateException("Expected NodeType: " + type 
+
+          ", but found: " + nodeType);
+    }
+  }
+
+  private void verifyClusterId()
+      throws InconsistentStorageStateException {
+    String clusterId = getClusterID();
+    Preconditions.checkNotNull(clusterId);
+    if(clusterId.isEmpty()) {
+      throw new InconsistentStorageStateException("Cluster ID not found");
+    }
+  }
+
+  private void verifyCreationTime() {
+    Long creationTime = getCreationTime();
+    Preconditions.checkNotNull(creationTime);
+  }
+
+
+  public void writeTo(File to)
+      throws IOException {
+    try (RandomAccessFile file = new RandomAccessFile(to, "rws");
+         FileOutputStream out = new FileOutputStream(file.getFD())) {
+      file.seek(0);
+    /*
+     * If server is interrupted before this line,
+     * the version file will remain unchanged.
+     */
+      properties.store(out, null);
+    /*
+     * Now the new fields are flushed to the head of the file, but file
+     * length can still be larger then required and therefore the file can
+     * contain whole or corrupted fields from its old contents in the end.
+     * If server is interrupted here and restarted later these extra fields
+     * either should not effect server behavior or should be handled
+     * by the server correctly.
+     */
+      file.setLength(out.getChannel().position());
+    }
+  }
+
+  private Properties readFrom(File from) throws IOException {
+    try (RandomAccessFile file = new RandomAccessFile(from, "rws");
+        FileInputStream in = new FileInputStream(file.getFD())) {
+      Properties props = new Properties();
+      file.seek(0);
+      props.load(in);
+      return props;
+    }
+  }
+
+  /**
+   * Generate new clusterID.
+   *
+   * clusterID is a persistent attribute of the cluster.
+   * It is generated when the cluster is created and remains the same
+   * during the life cycle of the cluster.  When a new SCM node is initialized,
+   * if this is a new cluster, a new clusterID is generated and stored.
+   * @return new clusterID
+   */
+  public static String newClusterID() {
+    return "CID-" + UUID.randomUUID().toString();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
new file mode 100644
index 0000000..6517e58
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
new file mode 100644
index 0000000..9aeff24
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.common.statemachine;
+
+/**
+ * Class wraps invalid state transition exception.
+ */
+public class InvalidStateTransitionException extends Exception {
+  private Enum<?> currentState;
+  private Enum<?> event;
+
+  public InvalidStateTransitionException(Enum<?> currentState, Enum<?> event) {
+    super("Invalid event: " + event + " at " + currentState + " state.");
+    this.currentState = currentState;
+    this.event = event;
+  }
+
+  public Enum<?> getCurrentState() {
+    return currentState;
+  }
+
+  public Enum<?> getEvent() {
+    return event;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
new file mode 100644
index 0000000..bf8cbd5
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.common.statemachine;
+
+import com.google.common.base.Supplier;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Template class that wraps simple event driven state machine.
+ * @param <STATE> states allowed
+ * @param <EVENT> events allowed
+ */
+public class StateMachine<STATE extends Enum<?>, EVENT extends Enum<?>> {
+  private STATE initialState;
+  private Set<STATE> finalStates;
+
+  private final LoadingCache<EVENT, Map<STATE, STATE>> transitions =
+      CacheBuilder.newBuilder().build(
+          CacheLoader.from((Supplier<Map<STATE, STATE>>) () -> new HashMap()));
+
+  public StateMachine(STATE initState, Set<STATE> finalStates) {
+    this.initialState = initState;
+    this.finalStates = finalStates;
+  }
+
+  public STATE getInitialState() {
+    return initialState;
+  }
+
+  public Set<STATE> getFinalStates() {
+    return finalStates;
+  }
+
+  public STATE getNextState(STATE from, EVENT e)
+      throws InvalidStateTransitionException {
+    STATE target = transitions.getUnchecked(e).get(from);
+    if (target == null) {
+      throw new InvalidStateTransitionException(from, e);
+    }
+    return target;
+  }
+
+  public void addTransition(STATE from, STATE to, EVENT e) {
+    transitions.getUnchecked(e).put(from, to);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
new file mode 100644
index 0000000..045409e
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common.statemachine;
+/**
+ state machine template class for ozone.
+ **/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
new file mode 100644
index 0000000..aa1fe74
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * Java class that represents ChunkInfo ProtoBuf class. This helper class 
allows
+ * us to convert to and from protobuf to normal java.
+ */
+public class ChunkInfo {
+  private final String chunkName;
+  private final long offset;
+  private final long len;
+  private String checksum;
+  private final Map<String, String> metadata;
+
+
+  /**
+   * Constructs a ChunkInfo.
+   *
+   * @param chunkName - File Name where chunk lives.
+   * @param offset    - offset where Chunk Starts.
+   * @param len       - Length of the Chunk.
+   */
+  public ChunkInfo(String chunkName, long offset, long len) {
+    this.chunkName = chunkName;
+    this.offset = offset;
+    this.len = len;
+    this.metadata = new TreeMap<>();
+  }
+
+  /**
+   * Adds metadata.
+   *
+   * @param key   - Key Name.
+   * @param value - Value.
+   * @throws IOException
+   */
+  public void addMetadata(String key, String value) throws IOException {
+    synchronized (this.metadata) {
+      if (this.metadata.containsKey(key)) {
+        throw new IOException("This key already exists. Key " + key);
+      }
+      metadata.put(key, value);
+    }
+  }
+
+  /**
+   * Gets a Chunkinfo class from the protobuf definitions.
+   *
+   * @param info - Protobuf class
+   * @return ChunkInfo
+   * @throws IOException
+   */
+  public static ChunkInfo getFromProtoBuf(ContainerProtos.ChunkInfo info)
+      throws IOException {
+    Preconditions.checkNotNull(info);
+
+    ChunkInfo chunkInfo = new ChunkInfo(info.getChunkName(), info.getOffset(),
+        info.getLen());
+
+    for (int x = 0; x < info.getMetadataCount(); x++) {
+      chunkInfo.addMetadata(info.getMetadata(x).getKey(),
+          info.getMetadata(x).getValue());
+    }
+
+
+    if (info.hasChecksum()) {
+      chunkInfo.setChecksum(info.getChecksum());
+    }
+    return chunkInfo;
+  }
+
+  /**
+   * Returns a ProtoBuf Message from ChunkInfo.
+   *
+   * @return Protocol Buffer Message
+   */
+  public ContainerProtos.ChunkInfo getProtoBufMessage() {
+    ContainerProtos.ChunkInfo.Builder builder = ContainerProtos
+        .ChunkInfo.newBuilder();
+
+    builder.setChunkName(this.getChunkName());
+    builder.setOffset(this.getOffset());
+    builder.setLen(this.getLen());
+    if (this.getChecksum() != null && !this.getChecksum().isEmpty()) {
+      builder.setChecksum(this.getChecksum());
+    }
+
+    for (Map.Entry<String, String> entry : metadata.entrySet()) {
+      HddsProtos.KeyValue.Builder keyValBuilder =
+          HddsProtos.KeyValue.newBuilder();
+      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
+          .setValue(entry.getValue()).build());
+    }
+
+    return builder.build();
+  }
+
+  /**
+   * Returns the chunkName.
+   *
+   * @return - String
+   */
+  public String getChunkName() {
+    return chunkName;
+  }
+
+  /**
+   * Gets the start offset of the given chunk in physical file.
+   *
+   * @return - long
+   */
+  public long getOffset() {
+    return offset;
+  }
+
+  /**
+   * Returns the length of the Chunk.
+   *
+   * @return long
+   */
+  public long getLen() {
+    return len;
+  }
+
+  /**
+   * Returns the SHA256 value of this chunk.
+   *
+   * @return - Hash String
+   */
+  public String getChecksum() {
+    return checksum;
+  }
+
+  /**
+   * Sets the Hash value of this chunk.
+   *
+   * @param checksum - Hash String.
+   */
+  public void setChecksum(String checksum) {
+    this.checksum = checksum;
+  }
+
+  /**
+   * Returns Metadata associated with this Chunk.
+   *
+   * @return - Map of Key,values.
+   */
+  public Map<String, String> getMetadata() {
+    return metadata;
+  }
+
+  @Override
+  public String toString() {
+    return "ChunkInfo{" +
+        "chunkName='" + chunkName +
+        ", offset=" + offset +
+        ", len=" + len +
+        '}';
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
new file mode 100644
index 0000000..be546c7
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * Helper class to convert Protobuf to Java classes.
+ */
+public class KeyData {
+  private final String containerName;
+  private final String keyName;
+  private final Map<String, String> metadata;
+
+  /**
+   * Please note : when we are working with keys, we don't care what they point
+   * to. So we We don't read chunkinfo nor validate them. It is responsibility
+   * of higher layer like ozone. We just read and write data from network.
+   */
+  private List<ContainerProtos.ChunkInfo> chunks;
+
+  /**
+   * Constructs a KeyData Object.
+   *
+   * @param containerName
+   * @param keyName
+   */
+  public KeyData(String containerName, String keyName) {
+    this.containerName = containerName;
+    this.keyName = keyName;
+    this.metadata = new TreeMap<>();
+  }
+
+  /**
+   * Returns a keyData object from the protobuf data.
+   *
+   * @param data - Protobuf data.
+   * @return - KeyData
+   * @throws IOException
+   */
+  public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws
+      IOException {
+    KeyData keyData = new KeyData(data.getContainerName(), data.getName());
+    for (int x = 0; x < data.getMetadataCount(); x++) {
+      keyData.addMetadata(data.getMetadata(x).getKey(),
+          data.getMetadata(x).getValue());
+    }
+    keyData.setChunks(data.getChunksList());
+    return keyData;
+  }
+
+  /**
+   * Returns a Protobuf message from KeyData.
+   * @return Proto Buf Message.
+   */
+  public ContainerProtos.KeyData getProtoBufMessage() {
+    ContainerProtos.KeyData.Builder builder =
+        ContainerProtos.KeyData.newBuilder();
+    builder.setContainerName(this.containerName);
+    builder.setName(this.getKeyName());
+    builder.addAllChunks(this.chunks);
+    for (Map.Entry<String, String> entry : metadata.entrySet()) {
+      HddsProtos.KeyValue.Builder keyValBuilder =
+          HddsProtos.KeyValue.newBuilder();
+      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
+          .setValue(entry.getValue()).build());
+    }
+    return builder.build();
+  }
+
+  /**
+   * Adds metadata.
+   *
+   * @param key   - Key
+   * @param value - Value
+   * @throws IOException
+   */
+  public synchronized void addMetadata(String key, String value) throws
+      IOException {
+    if (this.metadata.containsKey(key)) {
+      throw new IOException("This key already exists. Key " + key);
+    }
+    metadata.put(key, value);
+  }
+
+  public synchronized Map<String, String> getMetadata() {
+    return Collections.unmodifiableMap(this.metadata);
+  }
+
+  /**
+   * Returns value of a key.
+   */
+  public synchronized String getValue(String key) {
+    return metadata.get(key);
+  }
+
+  /**
+   * Deletes a metadata entry from the map.
+   *
+   * @param key - Key
+   */
+  public synchronized void deleteKey(String key) {
+    metadata.remove(key);
+  }
+
+  /**
+   * Returns chunks list.
+   *
+   * @return list of chunkinfo.
+   */
+  public List<ContainerProtos.ChunkInfo> getChunks() {
+    return chunks;
+  }
+
+  /**
+   * Returns container Name.
+   * @return String.
+   */
+  public String getContainerName() {
+    return containerName;
+  }
+
+  /**
+   * Returns KeyName.
+   * @return String.
+   */
+  public String getKeyName() {
+    return keyName;
+  }
+
+  /**
+   * Sets Chunk list.
+   *
+   * @param chunks - List of chunks.
+   */
+  public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
+    this.chunks = chunks;
+  }
+
+  /**
+   * Get the total size of chunks allocated for the key.
+   * @return total size of the key.
+   */
+  public long getSize() {
+    return chunks.parallelStream().mapToLong(e->e.getLen()).sum();
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to