http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index c949c6c..61dee2b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -31,62 +31,57 @@ import java.util.List;
  */
 public interface Mapping extends Closeable {
   /**
-   * Returns the ContainerInfo from the container name.
+   * Returns the ContainerInfo from the container ID.
    *
-   * @param containerName - Name
+   * @param containerID - ID of container.
    * @return - ContainerInfo such as creation state and the pipeline.
    * @throws IOException
    */
-  ContainerInfo getContainer(String containerName) throws IOException;
+  ContainerInfo getContainer(long containerID) throws IOException;
 
   /**
    * Returns containers under certain conditions.
-   * Search container names from start name(exclusive),
-   * and use prefix name to filter the result. The max
-   * size of the searching range cannot exceed the
+   * Search container IDs from start ID(exclusive),
+   * The max size of the searching range cannot exceed the
    * value of count.
    *
-   * @param startName start name, if null, start searching at the head.
-   * @param prefixName prefix name, if null, then filter is disabled.
-   * @param count count, if count < 0, the max size is unlimited.(
+   * @param startContainerID start containerID, >=0, start searching at the 
head if 0.
+   * @param count count must be >= 0
    *              Usually the count will be replace with a very big
-   *              value instead of being unlimited in case the db is very big)
+   *              value instead of being unlimited in case the db is very big.
    *
    * @return a list of container.
    * @throws IOException
    */
-  List<ContainerInfo> listContainer(String startName, String prefixName,
-      int count) throws IOException;
+  List<ContainerInfo> listContainer(long startContainerID, int count) throws 
IOException;
 
   /**
    * Allocates a new container for a given keyName and replication factor.
    *
    * @param replicationFactor - replication factor of the container.
-   * @param containerName - Name.
    * @param owner
    * @return - Container Info.
    * @throws IOException
    */
   ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor replicationFactor,
-      String containerName, String owner) throws IOException;
+      HddsProtos.ReplicationFactor replicationFactor, String owner) throws 
IOException;
 
   /**
    * Deletes a container from SCM.
    *
-   * @param containerName - Container Name
+   * @param containerID - Container ID
    * @throws IOException
    */
-  void deleteContainer(String containerName) throws IOException;
+  void deleteContainer(long containerID) throws IOException;
 
   /**
    * Update container state.
-   * @param containerName - Container Name
+   * @param containerID - Container ID
    * @param event - container life cycle event
    * @return - new container state
    * @throws IOException
    */
-  HddsProtos.LifeCycleState updateContainerState(String containerName,
+  HddsProtos.LifeCycleState updateContainerState(long containerID,
       HddsProtos.LifeCycleEvent event) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
index b5d4da9..75ec8e1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
@@ -51,7 +51,7 @@ public class ContainerCloser {
   private static final long MULTIPLIER = 3L;
   private static final int CLEANUP_WATER_MARK = 1000;
   private final NodeManager nodeManager;
-  private final Map<String, Long> commandIssued;
+  private final Map<Long, Long> commandIssued;
   private final Configuration configuration;
   private final AtomicInteger mapCount;
   private final long reportInterval;
@@ -93,12 +93,12 @@ public class ContainerCloser {
    */
   public void close(HddsProtos.SCMContainerInfo info) {
 
-    if (commandIssued.containsKey(info.getContainerName())) {
+    if (commandIssued.containsKey(info.getContainerID())) {
       // We check if we issued a close command in last 3 * reportInterval secs.
-      long commandQueueTime = commandIssued.get(info.getContainerName());
+      long commandQueueTime = commandIssued.get(info.getContainerID());
       long currentTime = TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow());
       if (currentTime > commandQueueTime + (MULTIPLIER * reportInterval)) {
-        commandIssued.remove(info.getContainerName());
+        commandIssued.remove(info.getContainerID());
         mapCount.decrementAndGet();
       } else {
         // Ignore this request, since we just issued a close command. We
@@ -131,10 +131,10 @@ public class ContainerCloser {
         pipeline.getPipelineChannel().getMembersList()) {
       nodeManager.addDatanodeCommand(
           DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(),
-          new CloseContainerCommand(info.getContainerName()));
+          new CloseContainerCommand(info.getContainerID()));
     }
-    if (!commandIssued.containsKey(info.getContainerName())) {
-      commandIssued.put(info.getContainerName(),
+    if (!commandIssued.containsKey(info.getContainerID())) {
+      commandIssued.put(info.getContainerID(),
           TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow()));
       mapCount.incrementAndGet();
     }
@@ -150,7 +150,7 @@ public class ContainerCloser {
       Runnable entryCleaner = () -> {
         LOG.debug("Starting close container Hash map cleaner.");
         try {
-          for (Map.Entry<String, Long> entry : commandIssued.entrySet()) {
+          for (Map.Entry<Long, Long> entry : commandIssued.entrySet()) {
             long commandQueueTime = entry.getValue();
             if (commandQueueTime + (MULTIPLIER * reportInterval) >
                 TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
index ddbd213..af878bf 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java
@@ -61,7 +61,7 @@ public final class InProgressPool {
   private final NodeManager nodeManager;
   private final NodePoolManager poolManager;
   private final ExecutorService executorService;
-  private final Map<String, Integer> containerCountMap;
+  private final Map<Long, Integer> containerCountMap;
   private final Map<UUID, Boolean> processedNodeSet;
   private final long startTime;
   private ProgressStatus status;
@@ -258,12 +258,12 @@ public final class InProgressPool {
         for (ContainerInfo info : reports.getReportsList()) {
           containerProcessedCount.incrementAndGet();
           LOG.debug("Total Containers processed: {} Container Name: {}",
-              containerProcessedCount.get(), info.getContainerName());
+              containerProcessedCount.get(), info.getContainerID());
 
           // Update the container map with count + 1 if the key exists or
           // update the map with 1. Since this is a concurrentMap the
           // computation and update is atomic.
-          containerCountMap.merge(info.getContainerName(), 1, Integer::sum);
+          containerCountMap.merge(info.getContainerID(), 1, Integer::sum);
         }
       }
     };
@@ -275,8 +275,8 @@ public final class InProgressPool {
    * @param predicate -- Predicate to filter by
    * @return A list of map entries.
    */
-  public List<Map.Entry<String, Integer>> filterContainer(
-      Predicate<Map.Entry<String, Integer>> predicate) {
+  public List<Map.Entry<Long, Integer>> filterContainer(
+      Predicate<Map.Entry<Long, Integer>> predicate) {
     return containerCountMap.entrySet().stream()
         .filter(predicate).collect(Collectors.toList());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
index a4a6c51..faf330e 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java
@@ -248,7 +248,7 @@ public final class SCMNodePoolManager implements 
NodePoolManager {
       throws SCMException {
     Preconditions.checkNotNull(datanodeDetails, "node is null");
     try {
-      byte[]  result = nodePoolStore.get(
+      byte[] result = nodePoolStore.get(
           datanodeDetails.getProtoBufMessage().toByteArray());
       return result == null ? null : DFSUtil.bytes2String(result);
     } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
index 8e43528..832fcc6 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
@@ -50,11 +50,10 @@ public abstract class PipelineManager {
    * needed and based on the replication type in the request appropriate
    * Interface is invoked.
    *
-   * @param containerName Name of the container
    * @param replicationFactor - Replication Factor
    * @return a Pipeline.
    */
-  public synchronized final Pipeline getPipeline(String containerName,
+  public synchronized final Pipeline getPipeline(
       ReplicationFactor replicationFactor, ReplicationType replicationType)
       throws IOException {
     /**
@@ -74,15 +73,17 @@ public abstract class PipelineManager {
     PipelineChannel pipelineChannel =
         allocatePipelineChannel(replicationFactor);
     if (pipelineChannel != null) {
-      LOG.debug("created new pipelineChannel:{} for container:{}",
-          pipelineChannel.getName(), containerName);
+      LOG.debug("created new pipelineChannel:{} for container with " +
+              "replicationType:{} replicationFactor:{}",
+          pipelineChannel.getName(), replicationType, replicationFactor);
       activePipelineChannels.add(pipelineChannel);
     } else {
       pipelineChannel =
           findOpenPipelineChannel(replicationType, replicationFactor);
       if (pipelineChannel != null) {
-        LOG.debug("re-used pipelineChannel:{} for container:{}",
-            pipelineChannel.getName(), containerName);
+        LOG.debug("re-used pipelineChannel:{} for container with " +
+                "replicationType:{} replicationFactor:{}",
+            pipelineChannel.getName(), replicationType, replicationFactor);
       }
     }
     if (pipelineChannel == null) {
@@ -90,7 +91,7 @@ public abstract class PipelineManager {
               "free nodes or operational pipelineChannel.");
       return null;
     } else {
-      return new Pipeline(containerName, pipelineChannel);
+      return new Pipeline(pipelineChannel);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index f0c9eea..d29bb84 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -166,14 +166,14 @@ public class PipelineSelector {
    */
 
   public Pipeline getReplicationPipeline(ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor, String containerName)
+      HddsProtos.ReplicationFactor replicationFactor)
       throws IOException {
     PipelineManager manager = getPipelineManager(replicationType);
     Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Getting replication pipeline for {} : Replication {}",
-        containerName, replicationFactor.toString());
+    LOG.debug("Getting replication pipeline forReplicationType {} : 
ReplicationFactor {}",
+        replicationType.toString(), replicationFactor.toString());
     return manager.
-        getPipeline(containerName, replicationFactor, replicationType);
+        getPipeline(replicationFactor, replicationType);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
index 089a137..70489b9 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
@@ -95,7 +95,7 @@ public class RatisManagerImpl extends PipelineManager {
               PipelineSelector.newPipelineFromNodes(newNodesList,
               LifeCycleState.OPEN, ReplicationType.RATIS, factor, conduitName);
           Pipeline pipeline =
-              new Pipeline("setup", pipelineChannel);
+              new Pipeline(pipelineChannel);
           try (XceiverClientRatis client =
               XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
             client.createPipeline(pipeline.getPipelineName(), newNodesList);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index e0560a1..98fe9a1 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
@@ -37,6 +36,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
 import org.apache.hadoop.ozone.protocolPB
     .ScmBlockLocationProtocolServerSideTranslatorPB;
@@ -46,9 +46,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY;
@@ -139,20 +137,6 @@ public class SCMBlockProtocolServer implements 
ScmBlockLocationProtocol {
   }
 
   @Override
-  public Set<AllocatedBlock> getBlockLocations(Set<String> keys) throws
-      IOException {
-    Set<AllocatedBlock> locatedBlocks = new HashSet<>();
-    for (String key : keys) {
-      Pipeline pipeline = scm.getScmBlockManager().getBlock(key);
-      AllocatedBlock block = new AllocatedBlock.Builder().setKey(key)
-          .setPipeline(pipeline).build();
-      locatedBlocks.add(block);
-    }
-    return locatedBlocks;
-
-  }
-
-  @Override
   public AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType
       type, HddsProtos.ReplicationFactor factor, String owner) throws
       IOException {
@@ -202,7 +186,7 @@ public class SCMBlockProtocolServer implements 
ScmBlockLocationProtocol {
             .Result.unknownFailure;
       }
       List<DeleteBlockResult> blockResultList = new ArrayList<>();
-      for (String blockKey : keyBlocks.getBlockIDList()) {
+      for (BlockID blockKey : keyBlocks.getBlockIDList()) {
         blockResultList.add(new DeleteBlockResult(blockKey, resultCode));
       }
       results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 42cce2f..246d053 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -21,6 +21,7 @@
  */
 package org.apache.hadoop.hdds.scm.server;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.BlockingService;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -137,32 +138,31 @@ public class SCMClientProtocolServer implements
   }
 
   @Override
-  public Pipeline allocateContainer(HddsProtos.ReplicationType
-      replicationType, HddsProtos.ReplicationFactor factor, String
-      containerName, String owner) throws IOException {
-    scm.checkAdminAccess();
+  public ContainerInfo allocateContainer(HddsProtos.ReplicationType
+      replicationType, HddsProtos.ReplicationFactor factor,
+      String owner) throws IOException {
+    getScm().checkAdminAccess();
     return scm.getScmContainerManager()
-        .allocateContainer(replicationType, factor, containerName, owner)
-        .getPipeline();
+        .allocateContainer(replicationType, factor, owner);
   }
 
   @Override
-  public Pipeline getContainer(String containerName) throws IOException {
+  public ContainerInfo getContainer(long containerID) throws IOException {
     return scm.getScmContainerManager()
-        .getContainer(containerName).getPipeline();
+        .getContainer(containerID);
   }
 
   @Override
-  public List<ContainerInfo> listContainer(String startName,
-      String prefixName, int count) throws IOException {
-    return scm.getScmContainerManager()
-        .listContainer(startName, prefixName, count);
+  public List<ContainerInfo> listContainer(long startContainerID,
+      int count) throws IOException {
+    return scm.getScmContainerManager().
+        listContainer(startContainerID, count);
   }
 
   @Override
-  public void deleteContainer(String containerName) throws IOException {
-    scm.checkAdminAccess();
-    scm.getScmContainerManager().deleteContainer(containerName);
+  public void deleteContainer(long containerID) throws IOException {
+    getScm().checkAdminAccess();
+    scm.getScmContainerManager().deleteContainer(containerID);
 
   }
 
@@ -193,12 +193,12 @@ public class SCMClientProtocolServer implements
 
   @Override
   public void notifyObjectStageChange(StorageContainerLocationProtocolProtos
-      .ObjectStageChangeRequestProto.Type type, String name,
+      .ObjectStageChangeRequestProto.Type type, long id,
       StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto.Op
           op, StorageContainerLocationProtocolProtos
       .ObjectStageChangeRequestProto.Stage stage) throws IOException {
 
-    LOG.info("Object type {} name {} op {} new stage {}", type, name, op,
+    LOG.info("Object type {} id {} op {} new stage {}", type, id, op,
         stage);
     if (type == StorageContainerLocationProtocolProtos
         .ObjectStageChangeRequestProto.Type.container) {
@@ -206,10 +206,10 @@ public class SCMClientProtocolServer implements
           .ObjectStageChangeRequestProto.Op.create) {
         if (stage == StorageContainerLocationProtocolProtos
             .ObjectStageChangeRequestProto.Stage.begin) {
-          scm.getScmContainerManager().updateContainerState(name, HddsProtos
+          scm.getScmContainerManager().updateContainerState(id, HddsProtos
               .LifeCycleEvent.CREATE);
         } else {
-          scm.getScmContainerManager().updateContainerState(name, HddsProtos
+          scm.getScmContainerManager().updateContainerState(id, HddsProtos
               .LifeCycleEvent.CREATED);
         }
       } else {
@@ -217,10 +217,10 @@ public class SCMClientProtocolServer implements
             .ObjectStageChangeRequestProto.Op.close) {
           if (stage == StorageContainerLocationProtocolProtos
               .ObjectStageChangeRequestProto.Stage.begin) {
-            scm.getScmContainerManager().updateContainerState(name, HddsProtos
+            scm.getScmContainerManager().updateContainerState(id, HddsProtos
                 .LifeCycleEvent.FINALIZE);
           } else {
-            scm.getScmContainerManager().updateContainerState(name, HddsProtos
+            scm.getScmContainerManager().updateContainerState(id, HddsProtos
                 .LifeCycleEvent.CLOSE);
           }
         }
@@ -292,6 +292,11 @@ public class SCMClientProtocolServer implements
     return resultList;
   }
 
+  @VisibleForTesting
+  public StorageContainerManager getScm() {
+    return scm;
+  }
+
   /**
    * Query the System for Nodes.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index af7dd3f..a7248bb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -87,7 +87,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
  * create a container, which then can be used to store data.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public final class StorageContainerManager extends ServiceRuntimeInfoImpl
+public class StorageContainerManager extends ServiceRuntimeInfoImpl
     implements SCMMXBean {
 
   private static final Logger LOG = LoggerFactory
@@ -168,8 +168,7 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
         cacheSize);
 
     scmBlockManager =
-        new BlockManagerImpl(conf, getScmNodeManager(), scmContainerManager,
-            cacheSize);
+        new BlockManagerImpl(conf, getScmNodeManager(), scmContainerManager);
 
     scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
         .OZONE_ADMINISTRATORS);
@@ -459,9 +458,9 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
   }
 
   @VisibleForTesting
-  public ContainerInfo getContainerInfo(String containerName) throws
+  public ContainerInfo getContainerInfo(long containerID) throws
       IOException {
-    return scmContainerManager.getContainer(containerName);
+    return scmContainerManager.getContainer(containerID);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index 0eff702..f3e42ea 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -40,7 +39,6 @@ import java.io.File;
 import java.io.IOException;
 import java.nio.file.Paths;
 import java.util.Collections;
-import java.util.UUID;
 
 import static org.apache.hadoop.ozone.OzoneConsts.GB;
 import static org.apache.hadoop.ozone.OzoneConsts.MB;
@@ -76,7 +74,7 @@ public class TestBlockManager {
     }
     nodeManager = new MockNodeManager(true, 10);
     mapping = new ContainerMapping(conf, nodeManager, 128);
-    blockManager = new BlockManagerImpl(conf, nodeManager, mapping, 128);
+    blockManager = new BlockManagerImpl(conf, nodeManager, mapping);
     if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
         ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){
       factor = HddsProtos.ReplicationFactor.THREE;
@@ -107,32 +105,12 @@ public class TestBlockManager {
   }
 
   @Test
-  public void testGetAllocatedBlock() throws IOException {
-    AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner);
-    Assert.assertNotNull(block);
-    Pipeline pipeline = blockManager.getBlock(block.getKey());
-    Assert.assertEquals(pipeline.getLeader().getUuid(),
-        block.getPipeline().getLeader().getUuid());
-  }
-
-  @Test
   public void testDeleteBlock() throws Exception {
     AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
         type, factor, containerOwner);
     Assert.assertNotNull(block);
-    blockManager.deleteBlocks(Collections.singletonList(block.getKey()));
-
-    // Deleted block can not be retrieved
-    thrown.expectMessage("Specified block key does not exist.");
-    blockManager.getBlock(block.getKey());
-
-    // Tombstone of the deleted block can be retrieved if it has not been
-    // cleaned yet.
-    String deletedKeyName = blockManager.getDeletedKeyName(block.getKey());
-    Pipeline pipeline = blockManager.getBlock(deletedKeyName);
-    Assert.assertEquals(pipeline.getLeader().getUuid(),
-        block.getPipeline().getLeader().getUuid());
+    blockManager.deleteBlocks(Collections.singletonList(
+        block.getBlockID()));
   }
 
   @Test
@@ -143,12 +121,6 @@ public class TestBlockManager {
         type, factor, containerOwner);
   }
 
-  @Test
-  public void testGetNoneExistentContainer() throws IOException {
-    String nonExistBlockKey = UUID.randomUUID().toString();
-    thrown.expectMessage("Specified block key does not exist.");
-    blockManager.getBlock(nonExistBlockKey);
-  }
 
   @Test
   public void testChillModeAllocateBlockFails() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 77030cd..f872e23 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -82,17 +82,22 @@ public class TestDeletedBlockLog {
     FileUtils.deleteDirectory(testDir);
   }
 
-  private Map<String, List<String>> generateData(int dataSize) {
-    Map<String, List<String>> blockMap = new HashMap<>();
+  private Map<Long, List<Long>> generateData(int dataSize) {
+    Map<Long, List<Long>> blockMap = new HashMap<>();
     Random random = new Random(1);
+    int continerIDBase = random.nextInt(100);
+    int localIDBase = random.nextInt(1000);
     for (int i = 0; i < dataSize; i++) {
-      String containerName = "container-" + UUID.randomUUID().toString();
-      List<String> blocks = new ArrayList<>();
+      //String containerName = "container-" + UUID.randomUUID().toString();
+      long containerID = continerIDBase + i;
+      List<Long> blocks = new ArrayList<>();
       int blockSize = random.nextInt(30) + 1;
       for (int j = 0; j < blockSize; j++)  {
-        blocks.add("block-" + UUID.randomUUID().toString());
+        //blocks.add("block-" + UUID.randomUUID().toString());
+        long localID = localIDBase + j;
+        blocks.add(localID);
       }
-      blockMap.put(containerName, blocks);
+      blockMap.put(containerID, blocks);
     }
     return blockMap;
   }
@@ -104,7 +109,7 @@ public class TestDeletedBlockLog {
     Assert.assertEquals(0, blocks.size());
 
     // Creates 40 TX in the log.
-    for (Map.Entry<String, List<String>> entry : generateData(40).entrySet()){
+    for (Map.Entry<Long, List<Long>> entry : generateData(40).entrySet()){
       deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
     }
 
@@ -143,7 +148,7 @@ public class TestDeletedBlockLog {
     int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
 
     // Create 30 TXs in the log.
-    for (Map.Entry<String, List<String>> entry : generateData(30).entrySet()){
+    for (Map.Entry<Long, List<Long>> entry : generateData(30).entrySet()){
       deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
     }
 
@@ -172,7 +177,7 @@ public class TestDeletedBlockLog {
 
   @Test
   public void testCommitTransactions() throws Exception {
-    for (Map.Entry<String, List<String>> entry : generateData(50).entrySet()){
+    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
       deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
     }
     List<DeletedBlocksTransaction> blocks =
@@ -203,7 +208,7 @@ public class TestDeletedBlockLog {
     for (int i = 0; i < 100; i++) {
       int state = random.nextInt(4);
       if (state == 0) {
-        for (Map.Entry<String, List<String>> entry :
+        for (Map.Entry<Long, List<Long>> entry :
             generateData(10).entrySet()){
           deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
         }
@@ -234,7 +239,7 @@ public class TestDeletedBlockLog {
 
   @Test
   public void testPersistence() throws Exception {
-    for (Map.Entry<String, List<String>> entry : generateData(50).entrySet()){
+    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
       deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
     }
     // close db and reopen it again to make sure
@@ -257,10 +262,10 @@ public class TestDeletedBlockLog {
     int txNum = 10;
     int maximumAllowedTXNum = 5;
     List<DeletedBlocksTransaction> blocks = null;
-    List<String> containerNames = new LinkedList<>();
+    List<Long> containerIDs = new LinkedList<>();
 
     int count = 0;
-    String containerName = null;
+    long containerID = 0L;
     DatanodeDetails dnDd1 = DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID().toString())
         .setIpAddress("127.0.0.1")
@@ -279,18 +284,18 @@ public class TestDeletedBlockLog {
         .build();
     Mapping mappingService = mock(ContainerMapping.class);
     // Creates {TXNum} TX in the log.
-    for (Map.Entry<String, List<String>> entry : generateData(txNum)
+    for (Map.Entry<Long, List<Long>> entry : generateData(txNum)
         .entrySet()) {
       count++;
-      containerName = entry.getKey();
-      containerNames.add(containerName);
-      deletedBlockLog.addTransaction(containerName, entry.getValue());
+      containerID = entry.getKey();
+      containerIDs.add(containerID);
+      deletedBlockLog.addTransaction(containerID, entry.getValue());
 
       // make TX[1-6] for datanode1; TX[7-10] for datanode2
       if (count <= (maximumAllowedTXNum + 1)) {
-        mockContainerInfo(mappingService, containerName, dnDd1);
+        mockContainerInfo(mappingService, containerID, dnDd1);
       } else {
-        mockContainerInfo(mappingService, containerName, dnId2);
+        mockContainerInfo(mappingService, containerID, dnId2);
       }
     }
 
@@ -325,7 +330,7 @@ public class TestDeletedBlockLog {
     DeletedBlocksTransaction.Builder builder =
         DeletedBlocksTransaction.newBuilder();
     builder.setTxID(11);
-    builder.setContainerName(containerName);
+    builder.setContainerID(containerID);
     builder.setCount(0);
     transactions.addTransaction(builder.build());
 
@@ -334,30 +339,29 @@ public class TestDeletedBlockLog {
         transactions.getDatanodeTransactions(dnId2.getUuid()).size());
 
     // Add new TX in dnID2, then dnID2 will reach maximum value.
-    containerName = "newContainer";
     builder = DeletedBlocksTransaction.newBuilder();
     builder.setTxID(12);
-    builder.setContainerName(containerName);
+    builder.setContainerID(containerID);
     builder.setCount(0);
-    mockContainerInfo(mappingService, containerName, dnId2);
+    mockContainerInfo(mappingService, containerID, dnId2);
     transactions.addTransaction(builder.build());
     // Since all node are full, then transactions is full.
     Assert.assertTrue(transactions.isFull());
   }
 
-  private void mockContainerInfo(Mapping mappingService, String containerName,
+  private void mockContainerInfo(Mapping mappingService, long containerID,
       DatanodeDetails dd) throws IOException {
     PipelineChannel pipelineChannel =
         new PipelineChannel("fake", LifeCycleState.OPEN,
             ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "fake");
     pipelineChannel.addMember(dd);
-    Pipeline pipeline = new Pipeline(containerName, pipelineChannel);
+    Pipeline pipeline = new Pipeline(pipelineChannel);
 
     ContainerInfo.Builder builder = new ContainerInfo.Builder();
     builder.setPipeline(pipeline);
 
     ContainerInfo conatinerInfo = builder.build();
     Mockito.doReturn(conatinerInfo).when(mappingService)
-        .getContainer(containerName);
+        .getContainer(containerID);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index 200a611..a27068bb 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -45,6 +45,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.NavigableSet;
+import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.UUID;
@@ -59,6 +60,7 @@ public class TestContainerMapping {
   private static File testDir;
   private static XceiverClientManager xceiverClientManager;
   private static String containerOwner = "OZONE";
+  private static Random random;
 
   private static final long TIMEOUT = 10000;
 
@@ -83,6 +85,7 @@ public class TestContainerMapping {
     nodeManager = new MockNodeManager(true, 10);
     mapping = new ContainerMapping(conf, nodeManager, 128);
     xceiverClientManager = new XceiverClientManager(conf);
+    random = new Random();
   }
 
   @AfterClass
@@ -103,7 +106,7 @@ public class TestContainerMapping {
     ContainerInfo containerInfo = mapping.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
-        UUID.randomUUID().toString(), containerOwner);
+        containerOwner);
     Assert.assertNotNull(containerInfo);
   }
 
@@ -120,7 +123,7 @@ public class TestContainerMapping {
       ContainerInfo containerInfo = mapping.allocateContainer(
           xceiverClientManager.getType(),
           xceiverClientManager.getFactor(),
-          UUID.randomUUID().toString(), containerOwner);
+          containerOwner);
 
       Assert.assertNotNull(containerInfo);
       Assert.assertNotNull(containerInfo.getPipeline());
@@ -132,59 +135,41 @@ public class TestContainerMapping {
 
   @Test
   public void testGetContainer() throws IOException {
-    String containerName = UUID.randomUUID().toString();
-    Pipeline pipeline = mapping.allocateContainer(
+    ContainerInfo containerInfo = mapping.allocateContainer(
         xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerName,
-        containerOwner).getPipeline();
+        xceiverClientManager.getFactor(),
+        containerOwner);
+    Pipeline pipeline  = containerInfo.getPipeline();
     Assert.assertNotNull(pipeline);
-    Pipeline newPipeline = mapping.getContainer(containerName).getPipeline();
+    Pipeline newPipeline = mapping.getContainer(
+        containerInfo.getContainerID()).getPipeline();
     Assert.assertEquals(pipeline.getLeader().getUuid(),
         newPipeline.getLeader().getUuid());
   }
 
   @Test
-  public void testDuplicateAllocateContainerFails() throws IOException {
-    String containerName = UUID.randomUUID().toString();
-    Pipeline pipeline = mapping.allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerName,
-        containerOwner).getPipeline();
-    Assert.assertNotNull(pipeline);
-    thrown.expectMessage("Specified container already exists.");
-    mapping.allocateContainer(xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerName,
-        containerOwner);
-  }
-
-  @Test
   public void testgetNoneExistentContainer() throws IOException {
-    String containerName = UUID.randomUUID().toString();
     thrown.expectMessage("Specified key does not exist.");
-    mapping.getContainer(containerName);
+    mapping.getContainer(random.nextLong());
   }
 
   @Test
   public void testChillModeAllocateContainerFails() throws IOException {
-    String containerName = UUID.randomUUID().toString();
     nodeManager.setChillmode(true);
     thrown.expectMessage("Unable to create container while in chill mode");
     mapping.allocateContainer(xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerName,
-        containerOwner);
+        xceiverClientManager.getFactor(), containerOwner);
   }
 
   @Test
   public void testContainerCreationLeaseTimeout() throws IOException,
       InterruptedException {
-    String containerName = UUID.randomUUID().toString();
     nodeManager.setChillmode(false);
     ContainerInfo containerInfo = mapping.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
-        containerName,
         containerOwner);
-    mapping.updateContainerState(containerInfo.getContainerName(),
+    mapping.updateContainerState(containerInfo.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
     Thread.sleep(TIMEOUT + 1000);
 
@@ -198,14 +183,13 @@ public class TestContainerMapping {
 
     thrown.expect(IOException.class);
     thrown.expectMessage("Lease Exception");
-    mapping.updateContainerState(containerInfo.getContainerName(),
+    mapping.updateContainerState(containerInfo.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
   }
 
   @Test
   public void testFullContainerReport() throws IOException {
-    String containerName = UUID.randomUUID().toString();
-    ContainerInfo info = createContainer(containerName);
+    ContainerInfo info = createContainer();
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
     ContainerReportsRequestProto.reportType reportType =
         ContainerReportsRequestProto.reportType.fullReport;
@@ -213,9 +197,7 @@ public class TestContainerMapping {
         new ArrayList<>();
     StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
         StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
-    ciBuilder.setContainerName(containerName)
-        //setting some random hash
-        .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
+    ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
         .setSize(5368709120L)
         .setUsed(2000000000L)
         .setKeyCount(100000000L)
@@ -234,15 +216,14 @@ public class TestContainerMapping {
 
     mapping.processContainerReports(crBuilder.build());
 
-    ContainerInfo updatedContainer = mapping.getContainer(containerName);
+    ContainerInfo updatedContainer = 
mapping.getContainer(info.getContainerID());
     Assert.assertEquals(100000000L, updatedContainer.getNumberOfKeys());
     Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
   }
 
   @Test
   public void testContainerCloseWithContainerReport() throws IOException {
-    String containerName = UUID.randomUUID().toString();
-    ContainerInfo info = createContainer(containerName);
+    ContainerInfo info = createContainer();
     DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
     ContainerReportsRequestProto.reportType reportType =
         ContainerReportsRequestProto.reportType.fullReport;
@@ -251,9 +232,7 @@ public class TestContainerMapping {
 
     StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
         StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
-    ciBuilder.setContainerName(containerName)
-        //setting some random hash
-        .setFinalhash("7c45eb4d7ed5e0d2e89aaab7759de02e")
+    ciBuilder.setFinalhash("7c45eb4d7ed5e0d2e89aaab7759de02e")
         .setSize(5368709120L)
         .setUsed(5368705120L)
         .setKeyCount(500000000L)
@@ -272,7 +251,7 @@ public class TestContainerMapping {
 
     mapping.processContainerReports(crBuilder.build());
 
-    ContainerInfo updatedContainer = mapping.getContainer(containerName);
+    ContainerInfo updatedContainer = 
mapping.getContainer(info.getContainerID());
     Assert.assertEquals(500000000L, updatedContainer.getNumberOfKeys());
     Assert.assertEquals(5368705120L, updatedContainer.getUsedBytes());
     NavigableSet<ContainerID> pendingCloseContainers = 
mapping.getStateManager()
@@ -287,9 +266,8 @@ public class TestContainerMapping {
 
   @Test
   public void testCloseContainer() throws IOException {
-    String containerName = UUID.randomUUID().toString();
-    ContainerInfo info = createContainer(containerName);
-    mapping.updateContainerState(containerName,
+    ContainerInfo info = createContainer();
+    mapping.updateContainerState(info.getContainerID(),
         HddsProtos.LifeCycleEvent.FINALIZE);
     NavigableSet<ContainerID> pendingCloseContainers = 
mapping.getStateManager()
         .getMatchingContainerIDs(
@@ -298,7 +276,7 @@ public class TestContainerMapping {
             xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.CLOSING);
     Assert.assertTrue(pendingCloseContainers.contains(info.containerID()));
-    mapping.updateContainerState(containerName,
+    mapping.updateContainerState(info.getContainerID(),
         HddsProtos.LifeCycleEvent.CLOSE);
     NavigableSet<ContainerID> closeContainers = mapping.getStateManager()
         .getMatchingContainerIDs(
@@ -311,21 +289,18 @@ public class TestContainerMapping {
 
   /**
    * Creates a container with the given name in ContainerMapping.
-   * @param containerName
-   *          Name of the container
    * @throws IOException
    */
-  private ContainerInfo createContainer(String containerName)
+  private ContainerInfo createContainer()
       throws IOException {
     nodeManager.setChillmode(false);
     ContainerInfo containerInfo = mapping.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
-        containerName,
         containerOwner);
-    mapping.updateContainerState(containerInfo.getContainerName(),
+    mapping.updateContainerState(containerInfo.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
-    mapping.updateContainerState(containerInfo.getContainerName(),
+    mapping.updateContainerState(containerInfo.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
     return containerInfo;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index 2fec232..f3f37c7 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.scm.container.closer;
 
-import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.scm.TestUtils;
@@ -92,15 +91,13 @@ public class TestContainerCloser {
 
   @Test
   public void testClose() throws IOException {
-    String containerName = "container-" + RandomStringUtils.randomNumeric(5);
-
     ContainerInfo info = mapping.allocateContainer(
         HddsProtos.ReplicationType.STAND_ALONE,
-        HddsProtos.ReplicationFactor.ONE, containerName, "ozone");
+        HddsProtos.ReplicationFactor.ONE, "ozone");
 
     //Execute these state transitions so that we can close the container.
-    mapping.updateContainerState(containerName, CREATE);
-    mapping.updateContainerState(containerName, CREATED);
+    mapping.updateContainerState(info.getContainerID(), CREATE);
+    mapping.updateContainerState(info.getContainerID(), CREATED);
     long currentCount = mapping.getCloser().getCloseCount();
     long runCount = mapping.getCloser().getThreadRunCount();
 
@@ -120,7 +117,7 @@ public class TestContainerCloser {
     long newUsed = (long) (size * 0.91f);
     sendContainerReport(info, newUsed);
 
-    // with only one container the  cleaner thread should not run.
+    // with only one container the cleaner thread should not run.
     Assert.assertEquals(runCount, mapping.getCloser().getThreadRunCount());
 
     // and close count will be one.
@@ -140,14 +137,13 @@ public class TestContainerCloser {
 
     configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1,
         TimeUnit.SECONDS);
-    String containerName = "container-" + RandomStringUtils.randomNumeric(5);
 
     ContainerInfo info = mapping.allocateContainer(
         HddsProtos.ReplicationType.STAND_ALONE,
-        HddsProtos.ReplicationFactor.ONE, containerName, "ozone");
+        HddsProtos.ReplicationFactor.ONE, "ozone");
 
     //Execute these state transitions so that we can close the container.
-    mapping.updateContainerState(containerName, CREATE);
+    mapping.updateContainerState(info.getContainerID(), CREATE);
 
     long currentCount = mapping.getCloser().getCloseCount();
     long runCount = mapping.getCloser().getThreadRunCount();
@@ -187,12 +183,11 @@ public class TestContainerCloser {
     long runCount = mapping.getCloser().getThreadRunCount();
 
     for (int x = 0; x < ContainerCloser.getCleanupWaterMark() + 10; x++) {
-      String containerName = "container-" + RandomStringUtils.randomNumeric(7);
       ContainerInfo info = mapping.allocateContainer(
           HddsProtos.ReplicationType.STAND_ALONE,
-          HddsProtos.ReplicationFactor.ONE, containerName, "ozone");
-      mapping.updateContainerState(containerName, CREATE);
-      mapping.updateContainerState(containerName, CREATED);
+          HddsProtos.ReplicationFactor.ONE, "ozone");
+      mapping.updateContainerState(info.getContainerID(), CREATE);
+      mapping.updateContainerState(info.getContainerID(), CREATED);
       sendContainerReport(info, 5 * GIGABYTE);
     }
 
@@ -210,7 +205,7 @@ public class TestContainerCloser {
 
     StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
         StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
-    ciBuilder.setContainerName(info.getContainerName())
+    ciBuilder.setContainerID(info.getContainerID())
         .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
         .setSize(size)
         .setUsed(used)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index ad50d97..6f994a9 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;
@@ -160,13 +161,11 @@ public class TestContainerPlacement {
 
       assertTrue(nodeManager.isOutOfChillMode());
 
-      String container1 = UUID.randomUUID().toString();
-      Pipeline pipeline1 = containerManager.allocateContainer(
+      ContainerInfo containerInfo = containerManager.allocateContainer(
           xceiverClientManager.getType(),
-          xceiverClientManager.getFactor(), container1, "OZONE")
-          .getPipeline();
+          xceiverClientManager.getFactor(), "OZONE");
       assertEquals(xceiverClientManager.getFactor().getNumber(),
-          pipeline1.getMachines().size());
+          containerInfo.getPipeline().getMachines().size());
     } finally {
       IOUtils.closeQuietly(containerManager);
       IOUtils.closeQuietly(nodeManager);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index d0839c5..ebfe978 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.ozone.container.common;
 
 import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.scm.TestUtils;
@@ -362,7 +363,7 @@ public class TestEndPoint {
    * @return
    */
   ContainerReport getRandomContainerReport() {
-    return new ContainerReport(UUID.randomUUID().toString(),
+    return new ContainerReport(RandomUtils.nextLong(),
         DigestUtils.sha256Hex("Random"));
   }
 
@@ -436,7 +437,8 @@ public class TestEndPoint {
         reportsBuilder = StorageContainerDatanodeProtocolProtos
         .ContainerReportsRequestProto.newBuilder();
     for (int x = 0; x < count; x++) {
-      ContainerReport report = new 
ContainerReport(UUID.randomUUID().toString(),
+      long containerID = RandomUtils.nextLong();
+      ContainerReport report = new ContainerReport(containerID,
             DigestUtils.sha256Hex("Simulated"));
       report.setKeyCount(1000);
       report.setSize(OzoneConsts.GB * 5);
@@ -445,7 +447,6 @@ public class TestEndPoint {
       report.setReadBytes(OzoneConsts.GB * 1);
       report.setWriteCount(50);
       report.setWriteBytes(OzoneConsts.GB * 2);
-      report.setContainerID(1);
 
       reportsBuilder.addReports(report.getProtoBufMessage());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
index 8eb07e6..01f70b1 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java
@@ -149,17 +149,17 @@ public class TestContainerSupervisor {
    */
   public void testDetectSingleContainerReplica() throws TimeoutException,
       InterruptedException {
-    String singleNodeContainer = "SingleNodeContainer";
-    String threeNodeContainer = "ThreeNodeContainer";
+    long singleNodeContainerID = 9001;
+    long threeNodeContainerID = 9003;
     InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0);
     // Only single datanode reporting that "SingleNodeContainer" exists.
     List<ContainerReportsRequestProto> clist =
-        datanodeStateManager.getContainerReport(singleNodeContainer,
+        datanodeStateManager.getContainerReport(singleNodeContainerID,
             ppool.getPool().getPoolName(), 1);
     ppool.handleContainerReport(clist.get(0));
 
     // Three nodes are going to report that ThreeNodeContainer  exists.
-    clist = datanodeStateManager.getContainerReport(threeNodeContainer,
+    clist = datanodeStateManager.getContainerReport(threeNodeContainerID,
         ppool.getPool().getPoolName(), 3);
 
     for (ContainerReportsRequestProto reportsProto : clist) {
@@ -169,9 +169,10 @@ public class TestContainerSupervisor {
         200, 1000);
     ppool.setDoneProcessing();
 
-    List<Map.Entry<String, Integer>> containers = ppool.filterContainer(p -> p
+    List<Map.Entry<Long, Integer>> containers = ppool.filterContainer(p -> p
         .getValue() == 1);
-    Assert.assertEquals(singleNodeContainer, containers.get(0).getKey());
+    Assert.assertEquals(singleNodeContainerID,
+        containers.get(0).getKey().longValue());
     int count = containers.get(0).getValue();
     Assert.assertEquals(1L, count);
   }
@@ -184,24 +185,24 @@ public class TestContainerSupervisor {
    */
   public void testDetectOverReplica() throws TimeoutException,
       InterruptedException {
-    String normalContainer = "NormalContainer";
-    String overReplicated = "OverReplicatedContainer";
-    String wayOverReplicated = "WayOverReplicated";
+    long normalContainerID = 9000;
+    long overReplicatedContainerID = 9001;
+    long wayOverReplicatedContainerID = 9002;
     InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0);
 
     List<ContainerReportsRequestProto> clist =
-        datanodeStateManager.getContainerReport(normalContainer,
+        datanodeStateManager.getContainerReport(normalContainerID,
             ppool.getPool().getPoolName(), 3);
     ppool.handleContainerReport(clist.get(0));
 
-    clist = datanodeStateManager.getContainerReport(overReplicated,
+    clist = datanodeStateManager.getContainerReport(overReplicatedContainerID,
         ppool.getPool().getPoolName(), 4);
 
     for (ContainerReportsRequestProto reportsProto : clist) {
       ppool.handleContainerReport(reportsProto);
     }
 
-    clist = datanodeStateManager.getContainerReport(wayOverReplicated,
+    clist = 
datanodeStateManager.getContainerReport(wayOverReplicatedContainerID,
         ppool.getPool().getPoolName(), 7);
 
     for (ContainerReportsRequestProto reportsProto : clist) {
@@ -215,7 +216,7 @@ public class TestContainerSupervisor {
         200, 1000);
     ppool.setDoneProcessing();
 
-    List<Map.Entry<String, Integer>> containers = ppool.filterContainer(p -> p
+    List<Map.Entry<Long, Integer>> containers = ppool.filterContainer(p -> p
         .getValue() > 3);
     Assert.assertEquals(2, containers.size());
   }
@@ -255,14 +256,15 @@ public class TestContainerSupervisor {
               logCapturer.getOutput().contains("PoolNew"),
           200, 15 * 1000);
 
+      long newContainerID = 7001;
       // Assert that we are able to send a container report to this new
       // pool and datanode.
       List<ContainerReportsRequestProto> clist =
-          datanodeStateManager.getContainerReport("NewContainer1",
+          datanodeStateManager.getContainerReport(newContainerID,
               "PoolNew", 1);
       containerSupervisor.handleContainerReport(clist.get(0));
       GenericTestUtils.waitFor(() ->
-          inProgressLog.getOutput().contains("NewContainer1") && inProgressLog
+          inProgressLog.getOutput().contains(Long.toString(newContainerID)) && 
inProgressLog
               .getOutput().contains(id.getUuidString()),
           200, 10 * 1000);
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
index 26f3514..50fd18f 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.container.testutils;
 
+import com.google.common.primitives.Longs;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodePoolManager;
@@ -56,13 +57,13 @@ public class ReplicationDatanodeStateManager {
 
   /**
    * Get Container Report as if it is from a datanode in the cluster.
-   * @param containerName - Container Name.
+   * @param containerID - Container ID.
    * @param poolName - Pool Name.
    * @param dataNodeCount - Datanode Count.
    * @return List of Container Reports.
    */
   public List<ContainerReportsRequestProto> getContainerReport(
-      String containerName, String poolName, int dataNodeCount) {
+      long containerID, String poolName, int dataNodeCount) {
     List<ContainerReportsRequestProto> containerList = new LinkedList<>();
     List<DatanodeDetails> nodesInPool = poolManager.getNodes(poolName);
 
@@ -75,7 +76,6 @@ public class ReplicationDatanodeStateManager {
           "required container reports");
     }
 
-    int containerID = 1;
     while (containerList.size() < dataNodeCount && nodesInPool.size() > 0) {
       DatanodeDetails id = nodesInPool.get(r.nextInt(nodesInPool.size()));
       nodesInPool.remove(id);
@@ -83,8 +83,9 @@ public class ReplicationDatanodeStateManager {
       // We return container reports only for nodes that are healthy.
       if (nodeManager.getNodeState(id) == HEALTHY) {
         ContainerInfo info = ContainerInfo.newBuilder()
-            .setContainerName(containerName)
-            .setFinalhash(DigestUtils.sha256Hex(containerName))
+            .setContainerID(containerID)
+            .setFinalhash(DigestUtils.sha256Hex(
+                Longs.toByteArray(containerID)))
             .setContainerID(containerID)
             .build();
         ContainerReportsRequestProto containerReport =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
index 9a44525..4f3b143 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
@@ -24,7 +24,7 @@ import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 
 import java.io.IOException;
 
@@ -34,30 +34,32 @@ import java.io.IOException;
 public class CloseContainerHandler extends OzoneCommandHandler {
 
   public static final String CONTAINER_CLOSE = "close";
-  public static final String OPT_CONTAINER_NAME = "c";
+  public static final String OPT_CONTAINER_ID = "c";
 
   @Override
   public void execute(CommandLine cmd) throws IOException {
     if (!cmd.hasOption(CONTAINER_CLOSE)) {
       throw new IOException("Expecting container close");
     }
-    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
+    if (!cmd.hasOption(OPT_CONTAINER_ID)) {
       displayHelp();
       if (!cmd.hasOption(SCMCLI.HELP_OP)) {
-        throw new IOException("Expecting container name");
+        throw new IOException("Expecting container id");
       } else {
         return;
       }
     }
-    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
+    String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
 
-    Pipeline pipeline = getScmClient().getContainer(containerName);
-    if (pipeline == null) {
+    ContainerInfo container = getScmClient().
+        getContainer(Long.parseLong(containerID));
+    if (container == null) {
       throw new IOException("Cannot close an non-exist container "
-          + containerName);
+          + containerID);
     }
-    logOut("Closing container : %s.", containerName);
-    getScmClient().closeContainer(pipeline);
+    logOut("Closing container : %s.", containerID);
+    getScmClient().closeContainer(container.getContainerID(),
+        container.getPipeline());
     logOut("Container closed.");
   }
 
@@ -72,8 +74,8 @@ public class CloseContainerHandler extends 
OzoneCommandHandler {
   }
 
   public static void addOptions(Options options) {
-    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
-        true, "Specify container name");
+    Option containerNameOpt = new Option(OPT_CONTAINER_ID,
+        true, "Specify container ID");
     options.addOption(containerNameOpt);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
index 980388f..428f179 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java
@@ -119,7 +119,6 @@ public class ContainerCommandHandler extends 
OzoneCommandHandler {
   public static void addOptions(Options options) {
     addCommandsOption(options);
     // for create container options.
-    CreateContainerHandler.addOptions(options);
     DeleteContainerHandler.addOptions(options);
     InfoContainerHandler.addOptions(options);
     ListContainerHandler.addOptions(options);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
index 2961831..c0ff1f7 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.scm.cli.container;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
@@ -35,7 +34,6 @@ import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
 public class CreateContainerHandler extends OzoneCommandHandler {
 
   public static final String CONTAINER_CREATE = "create";
-  public static final String OPT_CONTAINER_NAME = "c";
   public static final String CONTAINER_OWNER = "OZONE";
   // TODO Support an optional -p <pipelineID> option to create
   // container on given datanodes.
@@ -49,33 +47,17 @@ public class CreateContainerHandler extends 
OzoneCommandHandler {
     if (!cmd.hasOption(CONTAINER_CREATE)) {
       throw new IOException("Expecting container create");
     }
-    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
-      displayHelp();
-      if (!cmd.hasOption(HELP_OP)) {
-        throw new IOException("Expecting container name");
-      } else {
-        return;
-      }
-    }
-    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
 
-    logOut("Creating container : %s.", containerName);
-    getScmClient().createContainer(containerName, CONTAINER_OWNER);
+    logOut("Creating container...");
+    getScmClient().createContainer(CONTAINER_OWNER);
     logOut("Container created.");
   }
 
   @Override
   public void displayHelp() {
     Options options = new Options();
-    addOptions(options);
     HelpFormatter helpFormatter = new HelpFormatter();
     helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -create <option>",
         "where <option> is", options, "");
   }
-
-  public static void addOptions(Options options) {
-    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
-        true, "Specify container name");
-    options.addOption(containerNameOpt);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
index a5b625a..20a6d9e 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
@@ -25,7 +25,7 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 
 import java.io.IOException;
 
@@ -39,7 +39,7 @@ public class DeleteContainerHandler extends 
OzoneCommandHandler {
 
   protected static final String CONTAINER_DELETE = "delete";
   protected static final String OPT_FORCE = "f";
-  protected static final String OPT_CONTAINER_NAME = "c";
+  protected static final String OPT_CONTAINER_ID = "c";
 
   public DeleteContainerHandler(ScmClient scmClient) {
     super(scmClient);
@@ -49,7 +49,7 @@ public class DeleteContainerHandler extends 
OzoneCommandHandler {
   public void execute(CommandLine cmd) throws IOException {
     Preconditions.checkArgument(cmd.hasOption(CONTAINER_DELETE),
         "Expecting command delete");
-    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
+    if (!cmd.hasOption(OPT_CONTAINER_ID)) {
       displayHelp();
       if (!cmd.hasOption(HELP_OP)) {
         throw new IOException("Expecting container name");
@@ -58,17 +58,19 @@ public class DeleteContainerHandler extends 
OzoneCommandHandler {
       }
     }
 
-    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
+    String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
 
-    Pipeline pipeline = getScmClient().getContainer(containerName);
-    if (pipeline == null) {
+    ContainerInfo container = getScmClient().getContainer(
+        Long.parseLong(containerID));
+    if (container == null) {
       throw new IOException("Cannot delete an non-exist container "
-          + containerName);
+          + containerID);
     }
 
-    logOut("Deleting container : %s.", containerName);
-    getScmClient().deleteContainer(pipeline, cmd.hasOption(OPT_FORCE));
-    logOut("Container %s deleted.", containerName);
+    logOut("Deleting container : %s.", containerID);
+    getScmClient().deleteContainer(container.getContainerID(),
+        container.getPipeline(), cmd.hasOption(OPT_FORCE));
+    logOut("Container %s deleted.", containerID);
   }
 
   @Override
@@ -85,8 +87,8 @@ public class DeleteContainerHandler extends 
OzoneCommandHandler {
         false,
         "forcibly delete a container");
     options.addOption(forceOpt);
-    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
-        true, "Specify container name");
+    Option containerNameOpt = new Option(OPT_CONTAINER_ID,
+        true, "Specify container id");
     options.addOption(containerNameOpt);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index c609915..36d46c0 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -24,7 +24,7 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -41,7 +41,7 @@ import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
 public class InfoContainerHandler extends OzoneCommandHandler {
 
   public static final String CONTAINER_INFO = "info";
-  protected static final String OPT_CONTAINER_NAME = "c";
+  protected static final String OPT_CONTAINER_ID = "c";
 
   /**
    * Constructs a handler object.
@@ -57,7 +57,7 @@ public class InfoContainerHandler extends OzoneCommandHandler 
{
     if (!cmd.hasOption(CONTAINER_INFO)) {
       throw new IOException("Expecting container info");
     }
-    if (!cmd.hasOption(OPT_CONTAINER_NAME)) {
+    if (!cmd.hasOption(OPT_CONTAINER_ID)) {
       displayHelp();
       if (!cmd.hasOption(HELP_OP)) {
         throw new IOException("Expecting container name");
@@ -65,16 +65,17 @@ public class InfoContainerHandler extends 
OzoneCommandHandler {
         return;
       }
     }
-    String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME);
-    Pipeline pipeline = getScmClient().getContainer(containerName);
-    Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
+    String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
+    ContainerInfo container = getScmClient().
+        getContainer(Long.parseLong(containerID));
+    Preconditions.checkNotNull(container, "Container cannot be null");
 
     ContainerData containerData =
-        getScmClient().readContainer(pipeline);
+        getScmClient().readContainer(container.getContainerID(),
+            container.getPipeline());
 
     // Print container report info.
-    logOut("Container Name: %s",
-        containerData.getName());
+    logOut("Container id: %s", containerID);
     String openStatus =
         containerData.getState() == HddsProtos.LifeCycleState.OPEN ? "OPEN" :
             "CLOSED";
@@ -91,8 +92,10 @@ public class InfoContainerHandler extends 
OzoneCommandHandler {
     logOut("Container Metadata: {%s}", metadataStr);
 
     // Print pipeline of an existing container.
-    logOut("LeaderID: %s", pipeline.getLeader().getHostName());
-    String machinesStr = pipeline.getMachines().stream().map(
+    logOut("LeaderID: %s", container.getPipeline()
+        .getLeader().getHostName());
+    String machinesStr = container.getPipeline()
+        .getMachines().stream().map(
         DatanodeDetails::getHostName).collect(Collectors.joining(","));
     logOut("Datanodes: [%s]", machinesStr);
   }
@@ -107,8 +110,8 @@ public class InfoContainerHandler extends 
OzoneCommandHandler {
   }
 
   public static void addOptions(Options options) {
-    Option containerNameOpt = new Option(OPT_CONTAINER_NAME,
-        true, "Specify container name");
-    options.addOption(containerNameOpt);
+    Option containerIdOpt = new Option(OPT_CONTAINER_ID,
+        true, "Specify container id");
+    options.addOption(containerIdOpt);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
index 0c7e790..42dae65 100644
--- 
a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
+++ 
b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java
@@ -40,7 +40,6 @@ public class ListContainerHandler extends OzoneCommandHandler 
{
 
   public static final String CONTAINER_LIST = "list";
   public static final String OPT_START_CONTAINER = "start";
-  public static final String OPT_PREFIX_CONTAINER = "prefix";
   public static final String OPT_COUNT = "count";
 
   /**
@@ -71,8 +70,7 @@ public class ListContainerHandler extends OzoneCommandHandler 
{
       }
     }
 
-    String startName = cmd.getOptionValue(OPT_START_CONTAINER);
-    String prefixName = cmd.getOptionValue(OPT_PREFIX_CONTAINER);
+    String startID = cmd.getOptionValue(OPT_START_CONTAINER);
     int count = 0;
 
     if (cmd.hasOption(OPT_COUNT)) {
@@ -84,7 +82,8 @@ public class ListContainerHandler extends OzoneCommandHandler 
{
     }
 
     List<ContainerInfo> containerList =
-        getScmClient().listContainer(startName, prefixName, count);
+        getScmClient().listContainer(
+            Long.parseLong(startID), count);
 
     // Output data list
     for (ContainerInfo container : containerList) {
@@ -109,13 +108,10 @@ public class ListContainerHandler extends 
OzoneCommandHandler {
 
   public static void addOptions(Options options) {
     Option startContainerOpt = new Option(OPT_START_CONTAINER,
-        true, "Specify start container name");
-    Option endContainerOpt = new Option(OPT_PREFIX_CONTAINER,
-        true, "Specify prefix container name");
+        true, "Specify start container id");
     Option countOpt = new Option(OPT_COUNT, true,
         "Specify count number, required");
     options.addOption(countOpt);
     options.addOption(startContainerOpt);
-    options.addOption(endContainerOpt);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a43ac28/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index b82ed25..ccc5911 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -21,11 +21,12 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import 
org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
@@ -260,7 +261,7 @@ public class ChunkGroupInputStream extends InputStream 
implements Seekable {
           storageContainerLocationClient, String requestId)
       throws IOException {
     long length = 0;
-    String containerKey;
+    long containerKey;
     ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream();
     groupInputStream.key = keyInfo.getKeyName();
     List<KsmKeyLocationInfo> keyLocationInfos =
@@ -268,20 +269,20 @@ public class ChunkGroupInputStream extends InputStream 
implements Seekable {
     groupInputStream.streamOffset = new long[keyLocationInfos.size()];
     for (int i = 0; i < keyLocationInfos.size(); i++) {
       KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i);
-      String containerName = ksmKeyLocationInfo.getContainerName();
-      Pipeline pipeline =
-          storageContainerLocationClient.getContainer(containerName);
+      BlockID blockID = ksmKeyLocationInfo.getBlockID();
+      long containerID = blockID.getContainerID();
+      ContainerInfo container =
+          storageContainerLocationClient.getContainer(containerID);
       XceiverClientSpi xceiverClient =
-          xceiverClientManager.acquireClient(pipeline);
+          xceiverClientManager.acquireClient(container.getPipeline(), 
containerID);
       boolean success = false;
-      containerKey = ksmKeyLocationInfo.getBlockID();
+      containerKey = ksmKeyLocationInfo.getLocalID();
       try {
         LOG.debug("get key accessing {} {}",
-            xceiverClient.getPipeline().getContainerName(), containerKey);
+            containerID, containerKey);
         groupInputStream.streamOffset[i] = length;
-        ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
-            .containerKeyDataForRead(
-                xceiverClient.getPipeline().getContainerName(), containerKey);
+          ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
+            .containerKeyDataForRead(blockID);
         ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
             .getKey(xceiverClient, containerKeyData, requestId);
         List<ContainerProtos.ChunkInfo> chunks =
@@ -291,7 +292,7 @@ public class ChunkGroupInputStream extends InputStream 
implements Seekable {
         }
         success = true;
         ChunkInputStream inputStream = new ChunkInputStream(
-            containerKey, xceiverClientManager, xceiverClient,
+            ksmKeyLocationInfo.getBlockID(), xceiverClientManager, 
xceiverClient,
             chunks, requestId);
         groupInputStream.addStream(inputStream,
             ksmKeyLocationInfo.getLength());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to