http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 4c465d3..41ceee4 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -21,6 +21,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -32,7 +34,6 @@ import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
 import 
org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.hdds.scm.protocolPB
@@ -162,30 +163,34 @@ public class ChunkGroupOutputStream extends OutputStream {
 
   private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
       throws IOException {
-    String containerKey = subKeyInfo.getBlockID();
-    String containerName = subKeyInfo.getContainerName();
-    Pipeline pipeline = scmClient.getContainer(containerName);
+    ContainerInfo container = scmClient.getContainer(
+        subKeyInfo.getContainerID());
     XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(pipeline);
+        xceiverClientManager.acquireClient(container.getPipeline(),
+            container.getContainerID());
     // create container if needed
     if (subKeyInfo.getShouldCreateContainer()) {
       try {
-        ContainerProtocolCalls.createContainer(xceiverClient, requestID);
+        ContainerProtocolCalls.createContainer(xceiverClient,
+            container.getContainerID(), requestID);
         scmClient.notifyObjectStageChange(
             ObjectStageChangeRequestProto.Type.container,
-            containerName, ObjectStageChangeRequestProto.Op.create,
+            subKeyInfo.getContainerID(),
+            ObjectStageChangeRequestProto.Op.create,
             ObjectStageChangeRequestProto.Stage.complete);
       } catch (StorageContainerException ex) {
         if (ex.getResult().equals(Result.CONTAINER_EXISTS)) {
           //container already exist, this should never happen
-          LOG.debug("Container {} already exists.", containerName);
+          LOG.debug("Container {} already exists.",
+              container.getContainerID());
         } else {
-          LOG.error("Container creation failed for {}.", containerName, ex);
+          LOG.error("Container creation failed for {}.",
+              container.getContainerID(), ex);
           throw ex;
         }
       }
     }
-    streamEntries.add(new ChunkOutputStreamEntry(containerKey,
+    streamEntries.add(new ChunkOutputStreamEntry(subKeyInfo.getBlockID(),
         keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID,
         chunkSize, subKeyInfo.getLength()));
   }
@@ -390,7 +395,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 
   private static class ChunkOutputStreamEntry extends OutputStream {
     private OutputStream outputStream;
-    private final String containerKey;
+    private final BlockID blockID;
     private final String key;
     private final XceiverClientManager xceiverClientManager;
     private final XceiverClientSpi xceiverClient;
@@ -401,12 +406,12 @@ public class ChunkGroupOutputStream extends OutputStream {
     // the current position of this stream 0 <= currentPosition < length
     private long currentPosition;
 
-    ChunkOutputStreamEntry(String containerKey, String key,
+    ChunkOutputStreamEntry(BlockID blockID, String key,
         XceiverClientManager xceiverClientManager,
         XceiverClientSpi xceiverClient, String requestId, int chunkSize,
         long length) {
       this.outputStream = null;
-      this.containerKey = containerKey;
+      this.blockID = blockID;
       this.key = key;
       this.xceiverClientManager = xceiverClientManager;
       this.xceiverClient = xceiverClient;
@@ -424,7 +429,7 @@ public class ChunkGroupOutputStream extends OutputStream {
      */
     ChunkOutputStreamEntry(OutputStream outputStream, long length) {
       this.outputStream = outputStream;
-      this.containerKey = null;
+      this.blockID = null;
       this.key = null;
       this.xceiverClientManager = null;
       this.xceiverClient = null;
@@ -445,7 +450,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 
     private synchronized void checkStream() {
       if (this.outputStream == null) {
-        this.outputStream = new ChunkOutputStream(containerKey,
+        this.outputStream = new ChunkOutputStream(blockID,
             key, xceiverClientManager, xceiverClient,
             requestId, chunkSize);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
index bf9e80f..2132bc8 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.client.io;
 
 
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.client.BlockID;
+
 
 /**
  * This class contains methods that define the translation between the Ozone
@@ -30,16 +32,13 @@ final class OzoneContainerTranslation {
   /**
    * Creates key data intended for reading a container key.
    *
-   * @param containerName container name
-   * @param containerKey container key
+   * @param blockID - ID of the block.
    * @return KeyData intended for reading the container key
    */
-  public static KeyData containerKeyDataForRead(String containerName,
-      String containerKey) {
+  public static KeyData containerKeyDataForRead(BlockID blockID) {
     return KeyData
         .newBuilder()
-        .setContainerName(containerName)
-        .setName(containerKey)
+        .setBlockID(blockID.getProtobuf())
         .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
index 9d24b30..45feda0 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.ksm.helpers;
 
+import org.apache.hadoop.hdds.client.BlockID;
 import 
org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocation;
 
 /**
@@ -23,9 +24,7 @@ import 
org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyL
  * into a number of subkeys. This class represents one such subkey instance.
  */
 public final class KsmKeyLocationInfo {
-  private final String containerName;
-  // name of the block id SCM assigned for the key
-  private final String blockID;
+  private final BlockID blockID;
   private final boolean shouldCreateContainer;
   // the id of this subkey in all the subkeys.
   private final long length;
@@ -33,10 +32,8 @@ public final class KsmKeyLocationInfo {
   // the version number indicating when this block was added
   private long createVersion;
 
-  private KsmKeyLocationInfo(String containerName,
-      String blockID, boolean shouldCreateContainer,
+  private KsmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer,
       long length, long offset) {
-    this.containerName = containerName;
     this.blockID = blockID;
     this.shouldCreateContainer = shouldCreateContainer;
     this.length = length;
@@ -51,12 +48,16 @@ public final class KsmKeyLocationInfo {
     return createVersion;
   }
 
-  public String getContainerName() {
-    return containerName;
+  public BlockID getBlockID() {
+    return blockID;
   }
 
-  public String getBlockID() {
-    return blockID;
+  public long getContainerID() {
+    return blockID.getContainerID();
+  }
+
+  public long getLocalID() {
+    return blockID.getLocalID();
   }
 
   public boolean getShouldCreateContainer() {
@@ -75,19 +76,13 @@ public final class KsmKeyLocationInfo {
    * Builder of KsmKeyLocationInfo.
    */
   public static class Builder {
-    private String containerName;
-    private String blockID;
+    private BlockID blockID;
     private boolean shouldCreateContainer;
     private long length;
     private long offset;
 
-    public Builder setContainerName(String container) {
-      this.containerName = container;
-      return this;
-    }
-
-    public Builder setBlockID(String block) {
-      this.blockID = block;
+    public Builder setBlockID(BlockID blockId) {
+      this.blockID = blockId;
       return this;
     }
 
@@ -107,15 +102,14 @@ public final class KsmKeyLocationInfo {
     }
 
     public KsmKeyLocationInfo build() {
-      return new KsmKeyLocationInfo(containerName, blockID,
+      return new KsmKeyLocationInfo(blockID,
           shouldCreateContainer, length, offset);
     }
   }
 
   public KeyLocation getProtobuf() {
     return KeyLocation.newBuilder()
-        .setContainerName(containerName)
-        .setBlockID(blockID)
+        .setBlockID(blockID.getProtobuf())
         .setShouldCreateContainer(shouldCreateContainer)
         .setLength(length)
         .setOffset(offset)
@@ -125,8 +119,7 @@ public final class KsmKeyLocationInfo {
 
   public static KsmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
     KsmKeyLocationInfo info = new KsmKeyLocationInfo(
-        keyLocation.getContainerName(),
-        keyLocation.getBlockID(),
+        BlockID.getFromProtobuf(keyLocation.getBlockID()),
         keyLocation.getShouldCreateContainer(),
         keyLocation.getLength(),
         keyLocation.getOffset());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
index bef65ec..0facf3c 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
@@ -111,7 +111,7 @@ public class KsmKeyLocationInfoGroup {
     StringBuilder sb = new StringBuilder();
     sb.append("version:").append(version).append(" ");
     for (KsmKeyLocationInfo kli : locationList) {
-      sb.append(kli.getBlockID()).append(" || ");
+      sb.append(kli.getLocalID()).append(" || ");
     }
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java
index 07856d0..8a75928 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Request;
 import javax.ws.rs.core.UriInfo;
+import java.util.Arrays;
 
 /**
  * UserArgs is used to package caller info
@@ -118,7 +119,8 @@ public class UserArgs {
    * @return String[]
    */
   public String[] getGroups() {
-    return this.groups;
+    return groups != null ?
+        Arrays.copyOf(groups, groups.length) : null;
   }
 
   /**
@@ -127,7 +129,9 @@ public class UserArgs {
    * @param groups list of groups
    */
   public void setGroups(String[] groups) {
-    this.groups = groups;
+    if (groups != null) {
+      this.groups = Arrays.copyOf(groups, groups.length);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto 
b/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
index a6026f1..405c5b0 100644
--- a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
@@ -230,13 +230,12 @@ message KeyArgs {
 }
 
 message KeyLocation {
-    required string blockID = 1;
-    required string containerName = 2;
-    required bool shouldCreateContainer = 3;
-    required uint64 offset = 4;
-    required uint64 length = 5;
+    required hadoop.hdds.BlockID blockID = 1;
+    required bool shouldCreateContainer = 2;
+    required uint64 offset = 3;
+    required uint64 length = 4;
     // indicated at which version this block gets created.
-    optional uint64 createVersion = 6;
+    optional uint64 createVersion = 5;
 }
 
 message KeyLocationList {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index f745788..bedd5c4 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -16,7 +16,7 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.commons.lang.RandomStringUtils;
+import com.google.common.primitives.Longs;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -31,6 +31,8 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.NavigableSet;
 import java.util.Random;
 
@@ -69,15 +71,14 @@ public class TestContainerStateManager {
   @Test
   public void testAllocateContainer() throws IOException {
     // Allocate a container and verify the container info
-    String container1 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.getClientProtocolServer().allocateContainer(
+    ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
         xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container1, containerOwner);
+        xceiverClientManager.getFactor(), containerOwner);
     ContainerInfo info = containerStateManager
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.ALLOCATED);
-    Assert.assertEquals(container1, info.getContainerName());
+    Assert.assertEquals(container1.getContainerID(), info.getContainerID());
     Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes());
     Assert.assertEquals(containerOwner, info.getOwner());
     Assert.assertEquals(xceiverClientManager.getType(),
@@ -87,28 +88,31 @@ public class TestContainerStateManager {
     Assert.assertEquals(HddsProtos.LifeCycleState.ALLOCATED, info.getState());
 
     // Check there are two containers in ALLOCATED state after allocation
-    String container2 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.getClientProtocolServer().allocateContainer(
+    ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
         xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container2, containerOwner);
+        xceiverClientManager.getFactor(), containerOwner);
     int numContainers = containerStateManager
         .getMatchingContainerIDs(containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.ALLOCATED).size();
+    Assert.assertNotEquals(container1.getContainerID(), 
container2.getContainerID());
     Assert.assertEquals(2, numContainers);
   }
 
   @Test
   public void testContainerStateManagerRestart() throws IOException {
     // Allocate 5 containers in ALLOCATED state and 5 in CREATING state
-    String cname = "container" + RandomStringUtils.randomNumeric(5);
+
+    List<ContainerInfo> containers = new ArrayList<>();
     for (int i = 0; i < 10; i++) {
-      scm.getClientProtocolServer().allocateContainer(
+      ContainerInfo container = 
scm.getClientProtocolServer().allocateContainer(
           xceiverClientManager.getType(),
-          xceiverClientManager.getFactor(), cname + i, containerOwner);
+          xceiverClientManager.getFactor(), containerOwner);
+      containers.add(container);
       if (i >= 5) {
         scm.getScmContainerManager()
-            .updateContainerState(cname + i, HddsProtos.LifeCycleEvent.CREATE);
+            .updateContainerState(container.getContainerID(),
+                HddsProtos.LifeCycleEvent.CREATE);
       }
     }
 
@@ -117,48 +121,46 @@ public class TestContainerStateManager {
     ContainerStateManager stateManager =
         new ContainerStateManager(conf, scmContainerMapping
         );
-    int containers = stateManager
+    int matchCount = stateManager
         .getMatchingContainerIDs(containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.ALLOCATED).size();
-    Assert.assertEquals(5, containers);
-    containers = stateManager.getMatchingContainerIDs(containerOwner,
+    Assert.assertEquals(5, matchCount);
+    matchCount = stateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.CREATING).size();
-    Assert.assertEquals(5, containers);
+    Assert.assertEquals(5, matchCount);
   }
 
   @Test
   public void testGetMatchingContainer() throws IOException {
-    String container1 = "container-01234";
-    scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container1, containerOwner);
-    scmContainerMapping.updateContainerState(container1,
+    ContainerInfo container1 = scm.getClientProtocolServer().
+        allocateContainer(xceiverClientManager.getType(),
+        xceiverClientManager.getFactor(), containerOwner);
+    scmContainerMapping.updateContainerState(container1.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container1,
+    scmContainerMapping.updateContainerState(container1.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
 
-    String container2 = "container-56789";
-    scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container2, containerOwner);
+    ContainerInfo container2 = scm.getClientProtocolServer().
+        allocateContainer(xceiverClientManager.getType(),
+        xceiverClientManager.getFactor(), containerOwner);
 
     ContainerInfo info = containerStateManager
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.OPEN);
-    Assert.assertEquals(container1, info.getContainerName());
+    Assert.assertEquals(container1.getContainerID(), info.getContainerID());
 
     info = containerStateManager
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.ALLOCATED);
-    Assert.assertEquals(container2, info.getContainerName());
+    Assert.assertEquals(container2.getContainerID(), info.getContainerID());
 
-    scmContainerMapping.updateContainerState(container2,
+    scmContainerMapping.updateContainerState(container2.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container2,
+    scmContainerMapping.updateContainerState(container2.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
 
     // space has already been allocated in container1, now container 2 should
@@ -167,7 +169,7 @@ public class TestContainerStateManager {
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.OPEN);
-    Assert.assertEquals(container2, info.getContainerName());
+    Assert.assertEquals(container2.getContainerID(), info.getContainerID());
   }
 
   @Test
@@ -181,23 +183,22 @@ public class TestContainerStateManager {
 
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
-    String container1 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.getClientProtocolServer().allocateContainer(
+    ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
         xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container1, containerOwner);
+        xceiverClientManager.getFactor(), containerOwner);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.ALLOCATED).size();
     Assert.assertEquals(1, containers);
 
-    scmContainerMapping.updateContainerState(container1,
+    scmContainerMapping.updateContainerState(container1.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.CREATING).size();
     Assert.assertEquals(1, containers);
 
-    scmContainerMapping.updateContainerState(container1,
+    scmContainerMapping.updateContainerState(container1.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -205,28 +206,32 @@ public class TestContainerStateManager {
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1, HddsProtos.LifeCycleEvent.FINALIZE);
+        .updateContainerState(container1.getContainerID(),
+            HddsProtos.LifeCycleEvent.FINALIZE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.CLOSING).size();
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1, HddsProtos.LifeCycleEvent.CLOSE);
+        .updateContainerState(container1.getContainerID(),
+            HddsProtos.LifeCycleEvent.CLOSE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.CLOSED).size();
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1, HddsProtos.LifeCycleEvent.DELETE);
+        .updateContainerState(container1.getContainerID(),
+            HddsProtos.LifeCycleEvent.DELETE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.DELETING).size();
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1, HddsProtos.LifeCycleEvent.CLEANUP);
+        .updateContainerState(container1.getContainerID(),
+            HddsProtos.LifeCycleEvent.CLEANUP);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.DELETED).size();
@@ -234,14 +239,14 @@ public class TestContainerStateManager {
 
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // DELETING
-    String container2 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.getClientProtocolServer().allocateContainer(
+    ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
         xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container2, containerOwner);
-    scmContainerMapping.updateContainerState(container2,
+        xceiverClientManager.getFactor(), containerOwner);
+    scmContainerMapping.updateContainerState(container2.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
     scmContainerMapping
-        .updateContainerState(container2, HddsProtos.LifeCycleEvent.TIMEOUT);
+        .updateContainerState(container2.getContainerID(),
+            HddsProtos.LifeCycleEvent.TIMEOUT);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.DELETING).size();
@@ -249,18 +254,18 @@ public class TestContainerStateManager {
 
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // OPEN -> CLOSING -> CLOSED
-    String container3 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.getClientProtocolServer().allocateContainer(
+    ContainerInfo container3 = scm.getClientProtocolServer().allocateContainer(
         xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container3, containerOwner);
-    scmContainerMapping.updateContainerState(container3,
+        xceiverClientManager.getFactor(), containerOwner);
+    scmContainerMapping.updateContainerState(container3.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container3,
+    scmContainerMapping.updateContainerState(container3.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
-    scmContainerMapping.updateContainerState(container3,
+    scmContainerMapping.updateContainerState(container3.getContainerID(),
         HddsProtos.LifeCycleEvent.FINALIZE);
     scmContainerMapping
-        .updateContainerState(container3, HddsProtos.LifeCycleEvent.CLOSE);
+        .updateContainerState(container3.getContainerID(),
+            HddsProtos.LifeCycleEvent.CLOSE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.CLOSED).size();
@@ -269,13 +274,12 @@ public class TestContainerStateManager {
 
   @Test
   public void testUpdatingAllocatedBytes() throws Exception {
-    String container1 = "container" + RandomStringUtils.randomNumeric(5);
-    scm.getClientProtocolServer().allocateContainer(
+    ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
         xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), container1, containerOwner);
-    scmContainerMapping.updateContainerState(container1,
+        xceiverClientManager.getFactor(), containerOwner);
+    scmContainerMapping.updateContainerState(container1.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container1,
+    scmContainerMapping.updateContainerState(container1.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
 
     Random ran = new Random();
@@ -288,7 +292,7 @@ public class TestContainerStateManager {
           .getMatchingContainer(size, containerOwner,
               xceiverClientManager.getType(), xceiverClientManager.getFactor(),
               HddsProtos.LifeCycleState.OPEN);
-      Assert.assertEquals(container1, info.getContainerName());
+      Assert.assertEquals(container1.getContainerID(), info.getContainerID());
 
       ContainerMapping containerMapping =
           (ContainerMapping)scmContainerMapping;
@@ -296,10 +300,10 @@ public class TestContainerStateManager {
       // to disk
       containerMapping.flushContainerInfo();
 
-      Charset utf8 = Charset.forName("UTF-8");
       // the persisted value should always be equal to allocated size.
       byte[] containerBytes =
-          containerMapping.getContainerStore().get(container1.getBytes(utf8));
+          containerMapping.getContainerStore().get(
+              Longs.toByteArray(container1.getContainerID()));
       HddsProtos.SCMContainerInfo infoProto =
           HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
       ContainerInfo currentInfo = ContainerInfo.fromProtobuf(infoProto);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index 20579fd..d4c9d4f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -77,11 +78,12 @@ public class TestContainerOperations {
    */
   @Test
   public void testCreate() throws Exception {
-    Pipeline pipeline0 = storageClient.createContainer(HddsProtos
+    ContainerInfo container = storageClient.createContainer(HddsProtos
         .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor
-        .ONE, "container0", "OZONE");
-    assertEquals("container0", pipeline0.getContainerName());
-
+        .ONE, "OZONE");
+    assertEquals(container.getContainerID(),
+        storageClient.getContainer(container.getContainerID()).
+            getContainerID());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 6755e34..29238cf 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -91,7 +91,6 @@ public class TestMiniOzoneCluster {
     assertEquals(numberOfNodes, datanodes.size());
     for(HddsDatanodeService dn : datanodes) {
       // Create a single member pipe line
-      String containerName = OzoneUtils.getRequestID();
       DatanodeDetails datanodeDetails = dn.getDatanodeDetails();
       final PipelineChannel pipelineChannel =
           new PipelineChannel(datanodeDetails.getUuidString(),
@@ -99,7 +98,7 @@ public class TestMiniOzoneCluster {
               HddsProtos.ReplicationType.STAND_ALONE,
               HddsProtos.ReplicationFactor.ONE, "test");
       pipelineChannel.addMember(datanodeDetails);
-      Pipeline pipeline = new Pipeline(containerName, pipelineChannel);
+      Pipeline pipeline = new Pipeline(pipelineChannel);
 
       // Verify client is able to connect to the container
       try (XceiverClient client = new XceiverClient(pipeline, conf)){

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 5a5a08b..78ea5e1 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -20,10 +20,13 @@ package org.apache.hadoop.ozone;
 import static org.junit.Assert.fail;
 import java.io.IOException;
 
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -38,7 +41,6 @@ import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
 import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.junit.Rule;
 import org.junit.Assert;
@@ -113,7 +115,8 @@ public class TestStorageContainerManager {
           .thenReturn(fakeUser);
 
       try {
-        mockScm.getClientProtocolServer().deleteContainer("container1");
+        mockScm.getClientProtocolServer().deleteContainer(
+            ContainerTestHelper.getTestContainerID());
         fail("Operation should fail, expecting an IOException here.");
       } catch (Exception e) {
         if (expectPermissionDenied) {
@@ -127,35 +130,34 @@ public class TestStorageContainerManager {
       }
 
       try {
-        Pipeline pipeLine2 = mockScm.getClientProtocolServer()
+        ContainerInfo container2 = mockScm.getClientProtocolServer()
             .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, "container2", "OZONE");
+            HddsProtos.ReplicationFactor.ONE,  "OZONE");
         if (expectPermissionDenied) {
           fail("Operation should fail, expecting an IOException here.");
         } else {
-          Assert.assertEquals("container2", pipeLine2.getContainerName());
+          Assert.assertEquals(1, 
container2.getPipeline().getMachines().size());
         }
       } catch (Exception e) {
         verifyPermissionDeniedException(e, fakeUser);
       }
 
       try {
-        Pipeline pipeLine3 = mockScm.getClientProtocolServer()
+        ContainerInfo container3 = mockScm.getClientProtocolServer()
             .allocateContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, "container3", "OZONE");
-
+            HddsProtos.ReplicationFactor.ONE, "OZONE");
         if (expectPermissionDenied) {
           fail("Operation should fail, expecting an IOException here.");
         } else {
-          Assert.assertEquals("container3", pipeLine3.getContainerName());
-          Assert.assertEquals(1, pipeLine3.getMachines().size());
+          Assert.assertEquals(1, 
container3.getPipeline().getMachines().size());
         }
       } catch (Exception e) {
         verifyPermissionDeniedException(e, fakeUser);
       }
 
       try {
-        mockScm.getClientProtocolServer().getContainer("container4");
+        mockScm.getClientProtocolServer().getContainer(
+            ContainerTestHelper.getTestContainerID());
         fail("Operation should fail, expecting an IOException here.");
       } catch (Exception e) {
         if (expectPermissionDenied) {
@@ -210,9 +212,9 @@ public class TestStorageContainerManager {
           new TestStorageContainerManagerHelper(cluster, conf);
       Map<String, KsmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
 
-      Map<String, List<String>> containerBlocks = createDeleteTXLog(delLog,
+      Map<Long, List<Long>> containerBlocks = createDeleteTXLog(delLog,
           keyLocations, helper);
-      Set<String> containerNames = containerBlocks.keySet();
+      Set<Long> containerIDs = containerBlocks.keySet();
 
       // Verify a few TX gets created in the TX log.
       Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
@@ -229,16 +231,16 @@ public class TestStorageContainerManager {
           return false;
         }
       }, 1000, 10000);
-      Assert.assertTrue(helper.getAllBlocks(containerNames).isEmpty());
+      Assert.assertTrue(helper.getAllBlocks(containerIDs).isEmpty());
 
       // Continue the work, add some TXs that with known container names,
       // but unknown block IDs.
-      for (String containerName : containerBlocks.keySet()) {
+      for (Long containerID : containerBlocks.keySet()) {
         // Add 2 TXs per container.
-        delLog.addTransaction(containerName,
-            Collections.singletonList(RandomStringUtils.randomAlphabetic(5)));
-        delLog.addTransaction(containerName,
-            Collections.singletonList(RandomStringUtils.randomAlphabetic(5)));
+        delLog.addTransaction(containerID,
+            Collections.singletonList(RandomUtils.nextLong()));
+        delLog.addTransaction(containerID,
+            Collections.singletonList(RandomUtils.nextLong()));
       }
 
       // Verify a few TX gets created in the TX log.
@@ -319,16 +321,16 @@ public class TestStorageContainerManager {
     }, 500, 10000);
   }
 
-  private Map<String, List<String>> createDeleteTXLog(DeletedBlockLog delLog,
+  private Map<Long, List<Long>> createDeleteTXLog(DeletedBlockLog delLog,
       Map<String, KsmKeyInfo> keyLocations,
       TestStorageContainerManagerHelper helper) throws IOException {
     // These keys will be written into a bunch of containers,
     // gets a set of container names, verify container containerBlocks
     // on datanodes.
-    Set<String> containerNames = new HashSet<>();
+    Set<Long> containerNames = new HashSet<>();
     for (Map.Entry<String, KsmKeyInfo> entry : keyLocations.entrySet()) {
       entry.getValue().getLatestVersionLocations().getLocationList()
-          .forEach(loc -> containerNames.add(loc.getContainerName()));
+          .forEach(loc -> containerNames.add(loc.getContainerID()));
     }
 
     // Total number of containerBlocks of these containers should be equal to
@@ -342,22 +344,22 @@ public class TestStorageContainerManager {
         helper.getAllBlocks(containerNames).size());
 
     // Create a deletion TX for each key.
-    Map<String, List<String>> containerBlocks = Maps.newHashMap();
+    Map<Long, List<Long>> containerBlocks = Maps.newHashMap();
     for (KsmKeyInfo info : keyLocations.values()) {
       List<KsmKeyLocationInfo> list =
           info.getLatestVersionLocations().getLocationList();
       list.forEach(location -> {
-        if (containerBlocks.containsKey(location.getContainerName())) {
-          containerBlocks.get(location.getContainerName())
-              .add(location.getBlockID());
+        if (containerBlocks.containsKey(location.getContainerID())) {
+          containerBlocks.get(location.getContainerID())
+              .add(location.getBlockID().getLocalID());
         } else {
-          List<String> blks = Lists.newArrayList();
-          blks.add(location.getBlockID());
-          containerBlocks.put(location.getContainerName(), blks);
+          List<Long> blks = Lists.newArrayList();
+          blks.add(location.getBlockID().getLocalID());
+          containerBlocks.put(location.getContainerID(), blks);
         }
       });
     }
-    for (Map.Entry<String, List<String>> tx : containerBlocks.entrySet()) {
+    for (Map.Entry<Long, List<Long>> tx : containerBlocks.entrySet()) {
       delLog.addTransaction(tx.getKey(), tx.getValue());
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 9917018..da87d7a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -22,6 +22,7 @@ import com.google.common.collect.Sets;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -36,7 +37,6 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
 import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
 import org.apache.hadoop.utils.MetadataStore;
@@ -114,10 +114,10 @@ public class TestStorageContainerManagerHelper {
     return keyLocationMap;
   }
 
-  public List<String> getPendingDeletionBlocks(String containerName)
+  public List<String> getPendingDeletionBlocks(Long containerID)
       throws IOException {
     List<String> pendingDeletionBlocks = Lists.newArrayList();
-    MetadataStore meta = getContainerMetadata(containerName);
+    MetadataStore meta = getContainerMetadata(containerID);
     KeyPrefixFilter filter =
         new KeyPrefixFilter(OzoneConsts.DELETING_KEY_PREFIX);
     List<Map.Entry<byte[], byte[]>> kvs = meta
@@ -130,18 +130,18 @@ public class TestStorageContainerManagerHelper {
     return pendingDeletionBlocks;
   }
 
-  public List<String> getAllBlocks(Set<String> containerNames)
+  public List<Long> getAllBlocks(Set<Long> containerIDs)
       throws IOException {
-    List<String> allBlocks = Lists.newArrayList();
-    for (String containerName : containerNames) {
-      allBlocks.addAll(getAllBlocks(containerName));
+    List<Long> allBlocks = Lists.newArrayList();
+    for (Long containerID : containerIDs) {
+      allBlocks.addAll(getAllBlocks(containerID));
     }
     return allBlocks;
   }
 
-  public List<String> getAllBlocks(String containerName) throws IOException {
-    List<String> allBlocks = Lists.newArrayList();
-    MetadataStore meta = getContainerMetadata(containerName);
+  public List<Long> getAllBlocks(Long containeID) throws IOException {
+    List<Long> allBlocks = Lists.newArrayList();
+    MetadataStore meta = getContainerMetadata(containeID);
     MetadataKeyFilter filter =
         (preKey, currentKey, nextKey) -> !DFSUtil.bytes2String(currentKey)
             .startsWith(OzoneConsts.DELETING_KEY_PREFIX);
@@ -149,20 +149,21 @@ public class TestStorageContainerManagerHelper {
         meta.getRangeKVs(null, Integer.MAX_VALUE, filter);
     kvs.forEach(entry -> {
       String key = DFSUtil.bytes2String(entry.getKey());
-      allBlocks.add(key.replace(OzoneConsts.DELETING_KEY_PREFIX, ""));
+      key.replace(OzoneConsts.DELETING_KEY_PREFIX, "");
+      allBlocks.add(Long.parseLong(key));
     });
     return allBlocks;
   }
 
-  private MetadataStore getContainerMetadata(String containerName)
+  private MetadataStore getContainerMetadata(Long containerID)
       throws IOException {
-    Pipeline pipeline = cluster.getStorageContainerManager()
-        .getClientProtocolServer().getContainer(containerName);
-    DatanodeDetails leadDN = pipeline.getLeader();
+    ContainerInfo container = cluster.getStorageContainerManager()
+        .getClientProtocolServer().getContainer(containerID);
+    DatanodeDetails leadDN = container.getPipeline().getLeader();
     OzoneContainer containerServer =
         getContainerServerByDatanodeUuid(leadDN.getUuidString());
     ContainerData containerData = containerServer.getContainerManager()
-        .readContainer(containerName);
+        .readContainer(containerID);
     return KeyUtils.getDB(containerData, conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index 32a70a2..c9a25e5 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.client.rpc;
 
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -45,7 +46,6 @@ import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB.
     StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.util.Time;
@@ -388,10 +388,10 @@ public class TestOzoneRpcClient {
     KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs);
     for (KsmKeyLocationInfo info:
         keyInfo.getLatestVersionLocations().getLocationList()) {
-      Pipeline pipeline =
-          storageContainerLocationClient.getContainer(info.getContainerName());
-      if ((pipeline.getFactor() != replicationFactor) ||
-          (pipeline.getType() != replicationType)) {
+      ContainerInfo container =
+          storageContainerLocationClient.getContainer(info.getContainerID());
+      if ((container.getPipeline().getFactor() != replicationFactor) ||
+          (container.getPipeline().getType() != replicationType)) {
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index acab0b2..bcd08d7 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -21,12 +21,14 @@ package org.apache.hadoop.ozone.container;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import org.apache.commons.codec.binary.Hex;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
     .ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -39,6 +41,7 @@ import 
org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -79,9 +82,9 @@ public final class ContainerTestHelper {
    * @return Pipeline with single node in it.
    * @throws IOException
    */
-  public static Pipeline createSingleNodePipeline(String containerName) throws
+  public static Pipeline createSingleNodePipeline() throws
       IOException {
-    return createPipeline(containerName, 1);
+    return createPipeline(1);
   }
 
   public static String createLocalAddress() throws IOException {
@@ -111,19 +114,18 @@ public final class ContainerTestHelper {
    * @return Pipeline with single node in it.
    * @throws IOException
    */
-  public static Pipeline createPipeline(String containerName, int numNodes)
+  public static Pipeline createPipeline(int numNodes)
       throws IOException {
     Preconditions.checkArgument(numNodes >= 1);
     final List<DatanodeDetails> ids = new ArrayList<>(numNodes);
     for(int i = 0; i < numNodes; i++) {
       ids.add(createDatanodeDetails());
     }
-    return createPipeline(containerName, ids);
+    return createPipeline(ids);
   }
 
   public static Pipeline createPipeline(
-      String containerName, Iterable<DatanodeDetails> ids)
-      throws IOException {
+      Iterable<DatanodeDetails> ids) throws IOException {
     Objects.requireNonNull(ids, "ids == null");
     final Iterator<DatanodeDetails> i = ids.iterator();
     Preconditions.checkArgument(i.hasNext());
@@ -136,21 +138,21 @@ public final class ContainerTestHelper {
     for(; i.hasNext();) {
       pipelineChannel.addMember(i.next());
     }
-    return new Pipeline(containerName, pipelineChannel);
+    return new Pipeline(pipelineChannel);
   }
 
   /**
    * Creates a ChunkInfo for testing.
    *
-   * @param keyName - Name of the key
+   * @param keyID - ID of the key
    * @param seqNo - Chunk number.
    * @return ChunkInfo
    * @throws IOException
    */
-  public static ChunkInfo getChunk(String keyName, int seqNo, long offset,
+  public static ChunkInfo getChunk(long keyID, int seqNo, long offset,
       long len) throws IOException {
 
-    ChunkInfo info = new ChunkInfo(String.format("%s.data.%d", keyName,
+    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", keyID,
         seqNo), offset, len);
     return info;
   }
@@ -185,29 +187,27 @@ public final class ContainerTestHelper {
    * Returns a writeChunk Request.
    *
    * @param pipeline - A set of machines where this container lives.
-   * @param containerName - Name of the container.
-   * @param keyName - Name of the Key this chunk is part of.
+   * @param blockID - Block ID of the chunk.
    * @param datalen - Length of data.
    * @return ContainerCommandRequestProto
    * @throws IOException
    * @throws NoSuchAlgorithmException
    */
   public static ContainerCommandRequestProto getWriteChunkRequest(
-      Pipeline pipeline, String containerName, String keyName, int datalen)
+      Pipeline pipeline, BlockID blockID, int datalen)
       throws IOException, NoSuchAlgorithmException {
-    LOG.trace("writeChunk {} (key={}) to pipeline=",
-        datalen, keyName, pipeline);
+    LOG.trace("writeChunk {} (blockID={}) to pipeline=",
+        datalen, blockID, pipeline);
     ContainerProtos.WriteChunkRequestProto.Builder writeRequest =
         ContainerProtos.WriteChunkRequestProto
             .newBuilder();
 
     Pipeline newPipeline =
-        new Pipeline(containerName, pipeline.getPipelineChannel());
-    writeRequest.setPipeline(newPipeline.getProtobufMessage());
-    writeRequest.setKeyName(keyName);
+        new Pipeline(pipeline.getPipelineChannel());
+    writeRequest.setBlockID(blockID.getProtobuf());
 
     byte[] data = getData(datalen);
-    ChunkInfo info = getChunk(keyName, 0, 0, datalen);
+    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
     setDataChecksum(info, data);
 
     writeRequest.setChunkData(info.getProtoBufMessage());
@@ -227,29 +227,26 @@ public final class ContainerTestHelper {
    * Returns PutSmallFile Request that we can send to the container.
    *
    * @param pipeline - Pipeline
-   * @param containerName - ContainerName.
-   * @param keyName - KeyName
+   * @param blockID - Block ID of the small file.
    * @param dataLen - Number of bytes in the data
    * @return ContainerCommandRequestProto
    */
   public static ContainerCommandRequestProto getWriteSmallFileRequest(
-      Pipeline pipeline, String containerName, String keyName, int dataLen)
+      Pipeline pipeline, BlockID blockID, int dataLen)
       throws Exception {
     ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest =
         ContainerProtos.PutSmallFileRequestProto.newBuilder();
     Pipeline newPipeline =
-        new Pipeline(containerName, pipeline.getPipelineChannel());
+        new Pipeline(pipeline.getPipelineChannel());
     byte[] data = getData(dataLen);
-    ChunkInfo info = getChunk(keyName, 0, 0, dataLen);
+    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen);
     setDataChecksum(info, data);
 
 
     ContainerProtos.PutKeyRequestProto.Builder putRequest =
         ContainerProtos.PutKeyRequestProto.newBuilder();
 
-    putRequest.setPipeline(newPipeline.getProtobufMessage());
-    KeyData keyData = new KeyData(containerName, keyName);
-
+    KeyData keyData = new KeyData(blockID);
     List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
     newList.add(info.getProtoBufMessage());
     keyData.setChunks(newList);
@@ -270,12 +267,11 @@ public final class ContainerTestHelper {
 
 
   public static ContainerCommandRequestProto getReadSmallFileRequest(
-      ContainerProtos.PutKeyRequestProto putKey)
+      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKey)
       throws Exception {
     ContainerProtos.GetSmallFileRequestProto.Builder smallFileRequest =
         ContainerProtos.GetSmallFileRequestProto.newBuilder();
-    Pipeline pipeline = Pipeline.getFromProtoBuf(putKey.getPipeline());
-    ContainerCommandRequestProto getKey = getKeyRequest(putKey);
+    ContainerCommandRequestProto getKey = getKeyRequest(pipeline, putKey);
     smallFileRequest.setKey(getKey.getGetKey());
 
     ContainerCommandRequestProto.Builder request =
@@ -290,23 +286,21 @@ public final class ContainerTestHelper {
   /**
    * Returns a read Request.
    *
+   * @param pipeline pipeline.
    * @param request writeChunkRequest.
    * @return Request.
    * @throws IOException
    * @throws NoSuchAlgorithmException
    */
   public static ContainerCommandRequestProto getReadChunkRequest(
-      ContainerProtos.WriteChunkRequestProto request)
+      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto request)
       throws IOException, NoSuchAlgorithmException {
-    LOG.trace("readChunk key={} from pipeline={}",
-        request.getKeyName(), request.getPipeline());
+    LOG.trace("readChunk blockID={} from pipeline={}",
+        request.getBlockID(), pipeline);
 
     ContainerProtos.ReadChunkRequestProto.Builder readRequest =
         ContainerProtos.ReadChunkRequestProto.newBuilder();
-    Pipeline pipeline = Pipeline.getFromProtoBuf(request.getPipeline());
-    readRequest.setPipeline(request.getPipeline());
-
-    readRequest.setKeyName(request.getKeyName());
+    readRequest.setBlockID(request.getBlockID());
     readRequest.setChunkData(request.getChunkData());
 
     ContainerCommandRequestProto.Builder newRequest =
@@ -321,25 +315,25 @@ public final class ContainerTestHelper {
   /**
    * Returns a delete Request.
    *
+   * @param pipeline pipeline.
    * @param writeRequest - write request
    * @return request
    * @throws IOException
    * @throws NoSuchAlgorithmException
    */
   public static ContainerCommandRequestProto getDeleteChunkRequest(
-      ContainerProtos.WriteChunkRequestProto writeRequest)
+      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest)
       throws
       IOException, NoSuchAlgorithmException {
-    LOG.trace("deleteChunk key={} from pipeline={}",
-        writeRequest.getKeyName(), writeRequest.getPipeline());
-    Pipeline pipeline = Pipeline.getFromProtoBuf(writeRequest.getPipeline());
+    LOG.trace("deleteChunk blockID={} from pipeline={}",
+        writeRequest.getBlockID(), pipeline);
+
     ContainerProtos.DeleteChunkRequestProto.Builder deleteRequest =
         ContainerProtos.DeleteChunkRequestProto
             .newBuilder();
 
-    deleteRequest.setPipeline(writeRequest.getPipeline());
     deleteRequest.setChunkData(writeRequest.getChunkData());
-    deleteRequest.setKeyName(writeRequest.getKeyName());
+    deleteRequest.setBlockID(writeRequest.getBlockID());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -357,19 +351,17 @@ public final class ContainerTestHelper {
    * @return ContainerCommandRequestProto.
    */
   public static ContainerCommandRequestProto getCreateContainerRequest(
-      String containerName, Pipeline pipeline) throws IOException {
-    LOG.trace("addContainer: {}", containerName);
+      long containerID, Pipeline pipeline) throws IOException {
+    LOG.trace("addContainer: {}", containerID);
 
     ContainerProtos.CreateContainerRequestProto.Builder createRequest =
         ContainerProtos.CreateContainerRequestProto
             .newBuilder();
     ContainerProtos.ContainerData.Builder containerData = ContainerProtos
         .ContainerData.newBuilder();
-    containerData.setName(containerName);
-    createRequest.setPipeline(
-        ContainerTestHelper.createSingleNodePipeline(containerName)
-            .getProtobufMessage());
+    containerData.setContainerID(containerID);
     createRequest.setContainerData(containerData.build());
+    createRequest.setPipeline(pipeline.getProtobufMessage());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -386,18 +378,18 @@ public final class ContainerTestHelper {
    * Creates a container data based on the given meta data,
    * and request to update an existing container with it.
    *
-   * @param containerName
+   * @param containerID
    * @param metaData
    * @return
    * @throws IOException
    */
   public static ContainerCommandRequestProto getUpdateContainerRequest(
-      String containerName, Map<String, String> metaData) throws IOException {
+      long containerID, Map<String, String> metaData) throws IOException {
     ContainerProtos.UpdateContainerRequestProto.Builder updateRequestBuilder =
         ContainerProtos.UpdateContainerRequestProto.newBuilder();
     ContainerProtos.ContainerData.Builder containerData = ContainerProtos
         .ContainerData.newBuilder();
-    containerData.setName(containerName);
+    containerData.setContainerID(containerID);
     String[] keys = metaData.keySet().toArray(new String[]{});
     for(int i=0; i<keys.length; i++) {
       KeyValue.Builder kvBuilder = KeyValue.newBuilder();
@@ -406,7 +398,7 @@ public final class ContainerTestHelper {
       containerData.addMetadata(i, kvBuilder.build());
     }
     Pipeline pipeline =
-        ContainerTestHelper.createSingleNodePipeline(containerName);
+        ContainerTestHelper.createSingleNodePipeline();
     updateRequestBuilder.setPipeline(pipeline.getProtobufMessage());
     updateRequestBuilder.setContainerData(containerData.build());
 
@@ -440,22 +432,20 @@ public final class ContainerTestHelper {
 
   /**
    * Returns the PutKeyRequest for test purpose.
-   *
+   * @param pipeline - pipeline.
    * @param writeRequest - Write Chunk Request.
    * @return - Request
    */
   public static ContainerCommandRequestProto getPutKeyRequest(
-      ContainerProtos.WriteChunkRequestProto writeRequest) {
+      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest) {
     LOG.trace("putKey: {} to pipeline={}",
-        writeRequest.getKeyName(), writeRequest.getPipeline());
+        writeRequest.getBlockID());
 
-    Pipeline pipeline = Pipeline.getFromProtoBuf(writeRequest.getPipeline());
     ContainerProtos.PutKeyRequestProto.Builder putRequest =
         ContainerProtos.PutKeyRequestProto.newBuilder();
 
-    putRequest.setPipeline(writeRequest.getPipeline());
-    KeyData keyData = new 
KeyData(writeRequest.getPipeline().getContainerName(),
-        writeRequest.getKeyName());
+    KeyData keyData = new KeyData(
+        BlockID.getFromProtobuf(writeRequest.getBlockID()));
     List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
     newList.add(writeRequest.getChunkData());
     keyData.setChunks(newList);
@@ -472,24 +462,22 @@ public final class ContainerTestHelper {
 
   /**
    * Gets a GetKeyRequest for test purpose.
-   *
+   * @param  pipeline - pipeline
    * @param putKeyRequest - putKeyRequest.
    * @return - Request
+   * immediately.
    */
   public static ContainerCommandRequestProto getKeyRequest(
-      ContainerProtos.PutKeyRequestProto putKeyRequest) {
-    LOG.trace("getKey: name={} from pipeline={}",
-        putKeyRequest.getKeyData().getName(), putKeyRequest.getPipeline());
-    Pipeline pipeline = Pipeline.getFromProtoBuf(putKeyRequest.getPipeline());
+      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
+    HddsProtos.BlockID blockID = putKeyRequest.getKeyData().getBlockID();
+    LOG.trace("getKey: blockID={}", blockID);
 
     ContainerProtos.GetKeyRequestProto.Builder getRequest =
         ContainerProtos.GetKeyRequestProto.newBuilder();
     ContainerProtos.KeyData.Builder keyData = ContainerProtos.KeyData
         .newBuilder();
-    keyData.setContainerName(putKeyRequest.getPipeline().getContainerName());
-    keyData.setName(putKeyRequest.getKeyData().getName());
+    keyData.setBlockID(blockID);
     getRequest.setKeyData(keyData);
-    getRequest.setPipeline(putKeyRequest.getPipeline());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -517,18 +505,17 @@ public final class ContainerTestHelper {
   }
 
   /**
+   * @param pipeline - pipeline.
    * @param putKeyRequest - putKeyRequest.
    * @return - Request
    */
   public static ContainerCommandRequestProto getDeleteKeyRequest(
-      ContainerProtos.PutKeyRequestProto putKeyRequest) {
-    LOG.trace("deleteKey: name={} from pipeline={}",
-        putKeyRequest.getKeyData().getName(), putKeyRequest.getPipeline());
-    Pipeline pipeline = Pipeline.getFromProtoBuf(putKeyRequest.getPipeline());
+      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
+    LOG.trace("deleteKey: name={}",
+        putKeyRequest.getKeyData().getBlockID());
     ContainerProtos.DeleteKeyRequestProto.Builder delRequest =
         ContainerProtos.DeleteKeyRequestProto.newBuilder();
-    delRequest.setPipeline(putKeyRequest.getPipeline());
-    delRequest.setName(putKeyRequest.getKeyData().getName());
+    delRequest.setBlockID(putKeyRequest.getKeyData().getBlockID());
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.DeleteKey);
@@ -541,14 +528,14 @@ public final class ContainerTestHelper {
   /**
    * Returns a close container request.
    * @param pipeline - pipeline
+   * @param containerID - ID of the container.
    * @return ContainerCommandRequestProto.
    */
   public static ContainerCommandRequestProto getCloseContainer(
-      Pipeline pipeline) {
-    Preconditions.checkNotNull(pipeline);
+      Pipeline pipeline, long containerID) {
     ContainerProtos.CloseContainerRequestProto closeRequest =
-        ContainerProtos.CloseContainerRequestProto.newBuilder().setPipeline(
-            pipeline.getProtobufMessage()).build();
+        ContainerProtos.CloseContainerRequestProto.newBuilder().
+            setContainerID(containerID).build();
     ContainerProtos.ContainerCommandRequestProto cmd =
         ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos
             .Type.CloseContainer).setCloseContainer(closeRequest)
@@ -562,19 +549,19 @@ public final class ContainerTestHelper {
   /**
    * Returns a simple request without traceId.
    * @param pipeline - pipeline
+   * @param containerID - ID of the container.
    * @return ContainerCommandRequestProto without traceId.
    */
   public static ContainerCommandRequestProto getRequestWithoutTraceId(
-          Pipeline pipeline) {
+      Pipeline pipeline, long containerID) {
     Preconditions.checkNotNull(pipeline);
     ContainerProtos.CloseContainerRequestProto closeRequest =
-            
ContainerProtos.CloseContainerRequestProto.newBuilder().setPipeline(
-                    pipeline.getProtobufMessage()).build();
+            ContainerProtos.CloseContainerRequestProto.newBuilder().
+                setContainerID(containerID).build();
     ContainerProtos.ContainerCommandRequestProto cmd =
             
ContainerCommandRequestProto.newBuilder().setCmdType(ContainerProtos
                     .Type.CloseContainer).setCloseContainer(closeRequest)
-                    .setDatanodeUuid(
-                        pipeline.getLeader().getUuidString())
+                    .setDatanodeUuid(pipeline.getLeader().getUuidString())
                     .build();
     return cmd;
   }
@@ -585,12 +572,12 @@ public final class ContainerTestHelper {
    * @return ContainerCommandRequestProto.
    */
   public static ContainerCommandRequestProto getDeleteContainer(
-      Pipeline pipeline, boolean forceDelete) {
+      Pipeline pipeline, long containerID, boolean forceDelete) {
     Preconditions.checkNotNull(pipeline);
     ContainerProtos.DeleteContainerRequestProto deleteRequest =
-        ContainerProtos.DeleteContainerRequestProto.newBuilder().setName(
-            pipeline.getContainerName()).setPipeline(
-            pipeline.getProtobufMessage()).setForceDelete(forceDelete).build();
+        ContainerProtos.DeleteContainerRequestProto.newBuilder().
+            setContainerID(containerID).
+            setForceDelete(forceDelete).build();
     return ContainerCommandRequestProto.newBuilder()
         .setCmdType(ContainerProtos.Type.DeleteContainer)
         .setDeleteContainer(deleteRequest)
@@ -598,4 +585,23 @@ public final class ContainerTestHelper {
         .setDatanodeUuid(pipeline.getLeader().getUuidString())
         .build();
   }
+
+  private static void sleep(long milliseconds) {
+    try {
+      Thread.sleep(milliseconds);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    }
+  }
+
+  public static BlockID getTestBlockID(long containerID) {
+    // Add 2ms delay so that localID based on UtcTime
+    // won't collide.
+    sleep(2);
+    return new BlockID(containerID, Time.getUtcTime());
+  }
+
+  public static long getTestContainerID() {
+    return Time.getUtcTime();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 0f8c457..a60da21 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.container.common;
 import com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
@@ -27,6 +28,7 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import 
org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
@@ -35,7 +37,6 @@ import 
org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
 import 
org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import 
org.apache.hadoop.ozone.container.common.statemachine.background.BlockDeletingService;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
@@ -128,19 +129,21 @@ public class TestBlockDeletingService {
       Configuration conf, int numOfContainers, int numOfBlocksPerContainer,
       int numOfChunksPerBlock, File chunkDir) throws IOException {
     for (int x = 0; x < numOfContainers; x++) {
-      String containerName = OzoneUtils.getRequestID();
-      ContainerData data = new ContainerData(containerName, new Long(x), conf);
-      mgr.createContainer(createSingleNodePipeline(containerName), data);
-      data = mgr.readContainer(containerName);
+      long containerID = ContainerTestHelper.getTestContainerID();
+      ContainerData data = new ContainerData(containerID, conf);
+      mgr.createContainer(data);
+      data = mgr.readContainer(containerID);
       MetadataStore metadata = KeyUtils.getDB(data, conf);
       for (int j = 0; j<numOfBlocksPerContainer; j++) {
-        String blockName = containerName + "b" + j;
-        String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX + blockName;
-        KeyData kd = new KeyData(containerName, deleteStateName);
+        BlockID blockID =
+            ContainerTestHelper.getTestBlockID(containerID);
+        String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX +
+            blockID.getLocalID();
+        KeyData kd = new KeyData(blockID);
         List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
         for (int k = 0; k<numOfChunksPerBlock; k++) {
           // offset doesn't matter here
-          String chunkName = blockName + "_chunk_" + k;
+          String chunkName = blockID.getLocalID() + "_chunk_" + k;
           File chunk = new File(chunkDir, chunkName);
           FileUtils.writeStringToFile(chunk, "a chunk",
               Charset.defaultCharset());
@@ -200,7 +203,7 @@ public class TestBlockDeletingService {
 
     // Ensure 1 container was created
     List<ContainerData> containerData = Lists.newArrayList();
-    containerManager.listContainer(null, 1, "", containerData);
+    containerManager.listContainer(0L, 1, containerData);
     Assert.assertEquals(1, containerData.size());
     MetadataStore meta = KeyUtils.getDB(containerData.get(0), conf);
 
@@ -286,7 +289,7 @@ public class TestBlockDeletingService {
 
     // get container meta data
     List<ContainerData> containerData = Lists.newArrayList();
-    containerManager.listContainer(null, 1, "", containerData);
+    containerManager.listContainer(0L, 1, containerData);
     MetadataStore meta = KeyUtils.getDB(containerData.get(0), conf);
 
     LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
index 893f2f6..331db40 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java
@@ -93,12 +93,10 @@ public class TestContainerDeletionChoosingPolicy {
 
     int numContainers = 10;
     for (int i = 0; i < numContainers; i++) {
-      String containerName = OzoneUtils.getRequestID();
-      ContainerData data = new ContainerData(containerName, new Long(i), conf);
-      containerManager.createContainer(createSingleNodePipeline(containerName),
-          data);
+      ContainerData data = new ContainerData(new Long(i), conf);
+      containerManager.createContainer(data);
       Assert.assertTrue(
-          containerManager.getContainerMap().containsKey(containerName));
+          
containerManager.getContainerMap().containsKey(data.getContainerID()));
     }
 
     List<ContainerData> result0 = containerManager
@@ -113,8 +111,8 @@ public class TestContainerDeletionChoosingPolicy {
 
     boolean hasShuffled = false;
     for (int i = 0; i < numContainers; i++) {
-      if (!result1.get(i).getContainerName()
-          .equals(result2.get(i).getContainerName())) {
+      if (result1.get(i).getContainerID()
+           != result2.get(i).getContainerID()) {
         hasShuffled = true;
         break;
       }
@@ -144,9 +142,8 @@ public class TestContainerDeletionChoosingPolicy {
     // create [numContainers + 1] containers
     for (int i = 0; i <= numContainers; i++) {
       String containerName = OzoneUtils.getRequestID();
-      ContainerData data = new ContainerData(containerName, new Long(i), conf);
-      containerManager.createContainer(createSingleNodePipeline(containerName),
-          data);
+      ContainerData data = new ContainerData(new Long(i), conf);
+      containerManager.createContainer(data);
       Assert.assertTrue(
           containerManager.getContainerMap().containsKey(containerName));
 
@@ -186,7 +183,7 @@ public class TestContainerDeletionChoosingPolicy {
     // verify the order of return list
     int lastCount = Integer.MAX_VALUE;
     for (ContainerData data : result1) {
-      int currentCount = name2Count.remove(data.getContainerName());
+      int currentCount = name2Count.remove(data.getContainerID());
       // previous count should not smaller than next one
       Assert.assertTrue(currentCount > 0 && currentCount <= lastCount);
       lastCount = currentCount;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to