Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 8eca9824c -> 39058dd60


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index df9e632..f5f1de4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -53,178 +53,201 @@ public class TestOzoneContainer {
     path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
         OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
     conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
-
-    MiniOzoneCluster cluster = new MiniOzoneCluster.Builder(conf)
-        .setHandlerType("distributed").build();
-
-    // We don't start Ozone Container via data node, we will do it
-    // independently in our test path.
-    Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(
-        containerName);
-    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        pipeline.getLeader().getContainerPort());
-    OzoneContainer container = new OzoneContainer(conf);
-    container.start();
-
-    XceiverClient client = new XceiverClient(pipeline, conf);
-    client.connect();
-    ContainerProtos.ContainerCommandRequestProto request =
-        ContainerTestHelper.getCreateContainerRequest(containerName);
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(request);
-    Assert.assertNotNull(response);
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-    container.stop();
-    cluster.shutdown();
-
+    OzoneContainer container = null;
+    MiniOzoneCluster cluster = null;
+    try {
+      cluster =  new MiniOzoneCluster.Builder(conf)
+          .setHandlerType("distributed").build();
+      // We don't start Ozone Container via data node, we will do it
+      // independently in our test path.
+      Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline(
+          containerName);
+      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+          pipeline.getLeader().getContainerPort());
+      container = new OzoneContainer(conf);
+      container.start();
+
+      XceiverClient client = new XceiverClient(pipeline, conf);
+      client.connect();
+      ContainerProtos.ContainerCommandRequestProto request =
+          ContainerTestHelper.getCreateContainerRequest(containerName);
+      ContainerProtos.ContainerCommandResponseProto response =
+          client.sendCommand(request);
+      Assert.assertNotNull(response);
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+    } finally {
+      if (container != null) {
+        container.stop();
+      }
+      if(cluster != null) {
+        cluster.shutdown();
+      }
+    }
   }
 
   @Test
   public void testOzoneContainerViaDataNode() throws Exception {
-    String keyName = OzoneUtils.getRequestID();
-    String containerName = OzoneUtils.getRequestID();
-    OzoneConfiguration conf = new OzoneConfiguration();
-    URL p = conf.getClass().getResource("");
-    String path = p.getPath().concat(
-        TestOzoneContainer.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
-        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
-    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
-
-    // Start ozone container Via Datanode create.
-
-    Pipeline pipeline =
-        ContainerTestHelper.createSingleNodePipeline(containerName);
-    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        pipeline.getLeader().getContainerPort());
-
-    MiniOzoneCluster cluster = new MiniOzoneCluster.Builder(conf)
-        .setHandlerType("distributed").build();
-
-    // This client talks to ozone container via datanode.
-    XceiverClient client = new XceiverClient(pipeline, conf);
-    client.connect();
-
-    // Create container
-    ContainerProtos.ContainerCommandRequestProto request =
-        ContainerTestHelper.getCreateContainerRequest(containerName);
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(request);
-    Assert.assertNotNull(response);
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-    // Write Chunk
-    ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
-        ContainerTestHelper.getWriteChunkRequest(pipeline, containerName,
-            keyName, 1024);
-
-    response = client.sendCommand(writeChunkRequest);
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-    // Read Chunk
-    request = ContainerTestHelper.getReadChunkRequest(writeChunkRequest
-        .getWriteChunk());
-
-    response = client.sendCommand(request);
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-    // Put Key
-    ContainerProtos.ContainerCommandRequestProto putKeyRequest =
-        
ContainerTestHelper.getPutKeyRequest(writeChunkRequest.getWriteChunk());
-
-    response = client.sendCommand(putKeyRequest);
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-    // Get Key
-    request = ContainerTestHelper.getKeyRequest(putKeyRequest.getPutKey());
-    response = client.sendCommand(request);
-    ContainerTestHelper.verifyGetKey(request, response);
-
-
-    // Delete Key
-    request =
-        ContainerTestHelper.getDeleteKeyRequest(putKeyRequest.getPutKey());
-    response = client.sendCommand(request);
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-    //Delete Chunk
-    request = ContainerTestHelper.getDeleteChunkRequest(writeChunkRequest
-        .getWriteChunk());
-
-    response = client.sendCommand(request);
-    Assert.assertNotNull(response);
-    Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-    client.close();
-    cluster.shutdown();
-
+    MiniOzoneCluster cluster = null;
+    XceiverClient client = null;
+    try {
+      String keyName = OzoneUtils.getRequestID();
+      String containerName = OzoneUtils.getRequestID();
+      OzoneConfiguration conf = new OzoneConfiguration();
+      URL p = conf.getClass().getResource("");
+      String path = p.getPath().concat(
+          TestOzoneContainer.class.getSimpleName());
+      path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+          OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+      conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+
+      // Start ozone container Via Datanode create.
+
+      Pipeline pipeline =
+          ContainerTestHelper.createSingleNodePipeline(containerName);
+      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+          pipeline.getLeader().getContainerPort());
+
+      cluster = new MiniOzoneCluster.Builder(conf)
+          .setHandlerType("distributed").build();
+
+      // This client talks to ozone container via datanode.
+      client = new XceiverClient(pipeline, conf);
+      client.connect();
+
+      // Create container
+      ContainerProtos.ContainerCommandRequestProto request =
+          ContainerTestHelper.getCreateContainerRequest(containerName);
+      pipeline.setContainerName(containerName);
+      ContainerProtos.ContainerCommandResponseProto response =
+          client.sendCommand(request);
+      Assert.assertNotNull(response);
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+
+      // Write Chunk
+      ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
+          ContainerTestHelper.getWriteChunkRequest(pipeline, containerName,
+              keyName, 1024);
+
+      response = client.sendCommand(writeChunkRequest);
+      Assert.assertNotNull(response);
+      Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+
+      // Read Chunk
+      request = ContainerTestHelper.getReadChunkRequest(writeChunkRequest
+          .getWriteChunk());
+
+      response = client.sendCommand(request);
+      Assert.assertNotNull(response);
+      Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+
+      // Put Key
+      ContainerProtos.ContainerCommandRequestProto putKeyRequest =
+          ContainerTestHelper.getPutKeyRequest(writeChunkRequest
+              .getWriteChunk());
+
+
+      response = client.sendCommand(putKeyRequest);
+      Assert.assertNotNull(response);
+      Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+
+      // Get Key
+      request = ContainerTestHelper.getKeyRequest(putKeyRequest.getPutKey());
+      response = client.sendCommand(request);
+      ContainerTestHelper.verifyGetKey(request, response);
+
+
+      // Delete Key
+      request =
+          ContainerTestHelper.getDeleteKeyRequest(putKeyRequest.getPutKey());
+      response = client.sendCommand(request);
+      Assert.assertNotNull(response);
+      Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+
+      //Delete Chunk
+      request = ContainerTestHelper.getDeleteChunkRequest(writeChunkRequest
+          .getWriteChunk());
+
+      response = client.sendCommand(request);
+      Assert.assertNotNull(response);
+      Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+    } finally {
+      if (client != null) {
+        client.close();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
   }
 
   @Test
   public void testBothGetandPutSmallFile() throws Exception {
-    String keyName = OzoneUtils.getRequestID();
-    String containerName = OzoneUtils.getRequestID();
-    OzoneConfiguration conf = new OzoneConfiguration();
-    URL p = conf.getClass().getResource("");
-    String path = p.getPath().concat(
-        TestOzoneContainer.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
-        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
-    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
-
-    // Start ozone container Via Datanode create.
-
-    Pipeline pipeline =
-        ContainerTestHelper.createSingleNodePipeline(containerName);
-    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        pipeline.getLeader().getContainerPort());
-
-    MiniOzoneCluster cluster = new MiniOzoneCluster.Builder(conf)
-        .setHandlerType("distributed").build();
-
-    // This client talks to ozone container via datanode.
-    XceiverClient client = new XceiverClient(pipeline, conf);
-    client.connect();
-
-    // Create container
-    ContainerProtos.ContainerCommandRequestProto request =
-        ContainerTestHelper.getCreateContainerRequest(containerName);
-    ContainerProtos.ContainerCommandResponseProto response =
-        client.sendCommand(request);
-    Assert.assertNotNull(response);
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
-
-    ContainerProtos.ContainerCommandRequestProto smallFileRequest =
-        ContainerTestHelper.getWriteSmallFileRequest(pipeline, containerName,
-            keyName, 1024);
-
-
-    response = client.sendCommand(smallFileRequest);
-    Assert.assertNotNull(response);
-    Assert.assertTrue(smallFileRequest.getTraceID()
-        .equals(response.getTraceID()));
-
-    ContainerProtos.ContainerCommandRequestProto getSmallFileRequest =
-        ContainerTestHelper.getReadSmallFileRequest(smallFileRequest
-            .getPutSmallFile().getKey());
-    response = client.sendCommand(getSmallFileRequest);
-    Assert.assertArrayEquals(
-        smallFileRequest.getPutSmallFile().getData().toByteArray(),
-        response.getGetSmallFile().getData().getData().toByteArray());
-
-    cluster.shutdown();
-
-
+    MiniOzoneCluster cluster = null;
+    XceiverClient client = null;
+    try {
+      String keyName = OzoneUtils.getRequestID();
+      String containerName = OzoneUtils.getRequestID();
+      OzoneConfiguration conf = new OzoneConfiguration();
+      URL p = conf.getClass().getResource("");
+      String path = p.getPath().concat(
+          TestOzoneContainer.class.getSimpleName());
+      path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+          OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+      conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+
+      // Start ozone container Via Datanode create.
+
+      Pipeline pipeline =
+          ContainerTestHelper.createSingleNodePipeline(containerName);
+      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+          pipeline.getLeader().getContainerPort());
+
+      cluster = new MiniOzoneCluster.Builder(conf)
+          .setHandlerType("distributed").build();
+
+      // This client talks to ozone container via datanode.
+      client = new XceiverClient(pipeline, conf);
+      client.connect();
+
+      // Create container
+      ContainerProtos.ContainerCommandRequestProto request =
+          ContainerTestHelper.getCreateContainerRequest(containerName);
+      ContainerProtos.ContainerCommandResponseProto response =
+          client.sendCommand(request);
+      Assert.assertNotNull(response);
+      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
+
+
+      ContainerProtos.ContainerCommandRequestProto smallFileRequest =
+          ContainerTestHelper.getWriteSmallFileRequest(pipeline, containerName,
+              keyName, 1024);
+
+
+      response = client.sendCommand(smallFileRequest);
+      Assert.assertNotNull(response);
+      Assert.assertTrue(smallFileRequest.getTraceID()
+          .equals(response.getTraceID()));
+
+      ContainerProtos.ContainerCommandRequestProto getSmallFileRequest =
+          ContainerTestHelper.getReadSmallFileRequest(smallFileRequest
+              .getPutSmallFile().getKey());
+      response = client.sendCommand(getSmallFileRequest);
+      Assert.assertArrayEquals(
+          smallFileRequest.getPutSmallFile().getData().toByteArray(),
+          response.getGetSmallFile().getData().getData().toByteArray());
+    } finally {
+      if (client != null) {
+        client.close();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index 58d51a2..5a2dd2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -21,6 +21,7 @@ import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
 import 
org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.junit.AfterClass;
@@ -45,12 +46,15 @@ public class TestAllocateContainer {
   public ExpectedException thrown = ExpectedException.none();
 
   @BeforeClass
-  public static void init() throws IOException {
+  public static void init() throws Exception {
+    long datanodeCapacities = 3 * OzoneConsts.TB;
     conf = new OzoneConfiguration();
     cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1)
+        .storageCapacities(new long[] {datanodeCapacities, datanodeCapacities})
         .setHandlerType("distributed").build();
     storageContainerLocationClient =
         cluster.createStorageContainerLocationClient();
+    cluster.waitForHeartbeatProcessed();
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index a01edd1..8a8ea68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -21,6 +21,10 @@ import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.scm.container.ContainerPlacementPolicy;
+import org.apache.hadoop.ozone.scm.container.SCMContainerPlacementCapacity;
+import org.apache.hadoop.scm.ScmConfigKeys;
 import org.apache.hadoop.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.scm.XceiverClientManager;
@@ -35,7 +39,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import java.io.IOException;
 import java.util.UUID;
 
 /**
@@ -52,13 +55,18 @@ public class TestContainerSmallFile {
   private static XceiverClientManager xceiverClientManager;
 
   @BeforeClass
-  public static void init() throws IOException {
+  public static void init() throws Exception {
+    long datanodeCapacities = 3 * OzoneConsts.TB;
     ozoneConfig = new OzoneConfiguration();
-    cluster = new MiniOzoneCluster.Builder(ozoneConfig)
-        .numDataNodes(1).setHandlerType("distributed").build();
+    ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
+        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
+    cluster = new MiniOzoneCluster.Builder(ozoneConfig).numDataNodes(1)
+        .storageCapacities(new long[] {datanodeCapacities, datanodeCapacities})
+        .setHandlerType("distributed").build();
     storageContainerLocationClient = cluster
         .createStorageContainerLocationClient();
     xceiverClientManager = new XceiverClientManager(ozoneConfig);
+    cluster.waitForHeartbeatProcessed();
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/MockNodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/MockNodeManager.java
index 2e55046..14c36fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/MockNodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/container/MockNodeManager.java
@@ -188,15 +188,35 @@ public class MockNodeManager implements NodeManager {
   }
 
   /**
-   * Return a list of node stats.
+   * Return a map of nodes to their stats.
    * @return a list of individual node stats (live/stale but not dead).
    */
   @Override
-  public List<SCMNodeStat> getNodeStats() {
+  public Map<String, SCMNodeStat> getNodeStats() {
     return null;
   }
 
   /**
+   * Return the node stat of the specified datanode.
+   * @param datanodeID - datanode ID.
+   * @return node stat if it is live/stale, null if it is dead or does't exist.
+   */
+  @Override
+  public SCMNodeStat getNodeStat(DatanodeID datanodeID) {
+    return null;
+  }
+
+  /**
+   * Used for testing.
+   *
+   * @return true if the HB check is done.
+   */
+  @Override
+  public boolean waitForHeartbeatProcessed() {
+    return false;
+  }
+
+  /**
    * Closes this stream and releases any system resources associated with it. 
If
    * the stream is already closed then invoking this method has no effect.
    * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
new file mode 100644
index 0000000..9e99d70
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java
@@ -0,0 +1,191 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.scm.node;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import 
org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.ozone.scm.container.ContainerMapping;
+import org.apache.hadoop.ozone.scm.container.ContainerPlacementPolicy;
+import org.apache.hadoop.ozone.scm.container.SCMContainerPlacementCapacity;
+import org.apache.hadoop.scm.ScmConfigKeys;
+import org.apache.hadoop.scm.client.ScmClient;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.TimeoutException;
+
+import static 
org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
+import static org.apache.hadoop.ozone.scm.node.NodeManager.NODESTATE.HEALTHY;
+import static org.hamcrest.core.StringStartsWith.startsWith;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test for different container placement policy.
+ */
+public class TestContainerPlacement {
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  /**
+   * Returns a new copy of Configuration.
+   *
+   * @return Config
+   */
+  Configuration getConf() {
+    return new OzoneConfiguration();
+  }
+
+  /**
+   * Creates a NodeManager.
+   *
+   * @param config - Config for the node manager.
+   * @return SCNNodeManager
+   * @throws IOException
+   */
+
+  SCMNodeManager createNodeManager(Configuration config) throws IOException {
+    SCMNodeManager nodeManager = new SCMNodeManager(config,
+        UUID.randomUUID().toString());
+    assertFalse("Node manager should be in chill mode",
+        nodeManager.isOutOfNodeChillMode());
+    return nodeManager;
+  }
+
+  ContainerMapping createContainerManager(Configuration config,
+      NodeManager scmNodeManager) throws IOException {
+    final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
+        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
+    return new ContainerMapping(config, scmNodeManager, cacheSize);
+
+  }
+  /**
+   * Test capacity based container placement policy with node reports.
+   *
+   * @throws IOException
+   * @throws InterruptedException
+   * @throws TimeoutException
+   */
+  @Test
+  public void testContainerPlacementCapacity() throws IOException,
+      InterruptedException, TimeoutException {
+    Configuration conf = getConf();
+    final int nodeCount = 4;
+    final long capacity = 10L * OzoneConsts.GB;
+    final long used = 2L * OzoneConsts.GB;
+    final long remaining = capacity - used;
+
+    final File testDir = PathUtils.getTestDir(
+        TestContainerPlacement.class);
+    conf.set(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS,
+        testDir.getAbsolutePath());
+    conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
+        SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class);
+
+    SCMNodeManager nodeManager = createNodeManager(conf);
+    ContainerMapping containerManager =
+        createContainerManager(conf, nodeManager);
+    List<DatanodeID> datanodes = new ArrayList<>(nodeCount);
+    for (int i = 0; i < nodeCount; i++) {
+      datanodes.add(SCMTestUtils.getDatanodeID(nodeManager));
+    }
+
+    try {
+      for (DatanodeID datanodeID: datanodes) {
+        StorageContainerDatanodeProtocolProtos.SCMNodeReport.Builder nrb =
+            StorageContainerDatanodeProtocolProtos.SCMNodeReport.newBuilder();
+        StorageContainerDatanodeProtocolProtos.SCMStorageReport.Builder srb =
+            StorageContainerDatanodeProtocolProtos.SCMStorageReport
+                .newBuilder();
+        srb.setStorageUuid(UUID.randomUUID().toString());
+        srb.setCapacity(capacity).setScmUsed(used).
+            setRemaining(remaining).build();
+        nodeManager.sendHeartbeat(datanodeID,
+            nrb.addStorageReport(srb).build());
+      }
+
+      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
+          100, 4 * 1000);
+      assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
+      assertEquals(capacity * nodeCount,
+          nodeManager.getStats().getCapacity());
+      assertEquals(used * nodeCount,
+          nodeManager.getStats().getScmUsed());
+      assertEquals(remaining * nodeCount,
+          nodeManager.getStats().getRemaining());
+
+      assertTrue(nodeManager.isOutOfNodeChillMode());
+
+      String container1 = UUID.randomUUID().toString();
+      Pipeline pipeline1 = containerManager.allocateContainer(container1,
+          ScmClient.ReplicationFactor.THREE);
+      assertEquals(3, pipeline1.getMachines().size());
+
+      final long newUsed = 7L * OzoneConsts.GB;
+      final long newRemaining = capacity - newUsed;
+
+      for (DatanodeID datanodeID: datanodes) {
+        StorageContainerDatanodeProtocolProtos.SCMNodeReport.Builder nrb =
+            StorageContainerDatanodeProtocolProtos.SCMNodeReport.newBuilder();
+        StorageContainerDatanodeProtocolProtos.SCMStorageReport.Builder srb =
+            StorageContainerDatanodeProtocolProtos.SCMStorageReport
+                .newBuilder();
+        srb.setStorageUuid(UUID.randomUUID().toString());
+        srb.setCapacity(capacity).setScmUsed(newUsed).
+            setRemaining(newRemaining).build();
+        nodeManager.sendHeartbeat(datanodeID,
+            nrb.addStorageReport(srb).build());
+      }
+
+      GenericTestUtils.waitFor(() -> nodeManager.getStats().getRemaining() ==
+              nodeCount * newRemaining,
+          100, 4 * 1000);
+
+      thrown.expect(IOException.class);
+      thrown.expectMessage(
+          startsWith("No healthy node found with enough remaining capacity to" 
+
+              " allocate container."));
+      String container2 = UUID.randomUUID().toString();
+      containerManager.allocateContainer(container2,
+          ScmClient.ReplicationFactor.THREE);
+    } finally {
+      IOUtils.closeQuietly(containerManager);
+      IOUtils.closeQuietly(nodeManager);
+      FileUtil.fullyDelete(testDir);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
index 8d5d0e0..15586bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java
@@ -111,11 +111,11 @@ public class TestNodeManager {
       }
 
       // Wait for 4 seconds max.
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatThead(), 100,
-          4 * 1000);
+      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
+          100, 4 * 1000);
 
-      assertTrue("Heartbeat thread should have picked up the scheduled " +
-              "heartbeats and transitioned out of chill mode.",
+      assertTrue("Heartbeat thread should have picked up the" +
+              "scheduled heartbeats and transitioned out of chill mode.",
           nodeManager.isOutOfNodeChillMode());
     }
   }
@@ -132,10 +132,10 @@ public class TestNodeManager {
       InterruptedException, TimeoutException {
 
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatThead(), 100,
-          4 * 1000);
-      assertFalse("No heartbeats, Node manager should have been in chill 
mode.",
-          nodeManager.isOutOfNodeChillMode());
+      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
+          100, 4 * 1000);
+      assertFalse("No heartbeats, Node manager should have been in" +
+              " chill mode.", nodeManager.isOutOfNodeChillMode());
     }
   }
 
@@ -154,10 +154,10 @@ public class TestNodeManager {
       // Need 100 nodes to come out of chill mode, only one node is sending HB.
       nodeManager.setMinimumChillModeNodes(100);
       nodeManager.sendHeartbeat(SCMTestUtils.getDatanodeID(nodeManager), null);
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatThead(), 100,
-          4 * 1000);
-      assertFalse("Not enough heartbeat, Node manager should have been in " +
-          "chillmode.", nodeManager.isOutOfNodeChillMode());
+      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
+          100, 4 * 1000);
+      assertFalse("Not enough heartbeat, Node manager should have" +
+          "been in chillmode.", nodeManager.isOutOfNodeChillMode());
     }
   }
 
@@ -182,10 +182,10 @@ public class TestNodeManager {
         nodeManager.sendHeartbeat(datanodeID, null);
       }
 
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatThead(), 100,
-          4 * 1000);
-      assertFalse("Not enough nodes have send heartbeat to node manager.",
-          nodeManager.isOutOfNodeChillMode());
+      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
+          100, 4 * 1000);
+      assertFalse("Not enough nodes have send heartbeat to node" +
+              "manager.", nodeManager.isOutOfNodeChillMode());
     }
   }
 
@@ -237,8 +237,8 @@ public class TestNodeManager {
         DatanodeID datanodeID = SCMTestUtils.getDatanodeID(nodeManager);
         nodeManager.sendHeartbeat(datanodeID, null);
       }
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatThead(), 100,
-          4 * 1000);
+      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
+          100, 4 * 1000);
       assertEquals(count, nodeManager.getNodeCount(HEALTHY));
     }
   }
@@ -339,9 +339,10 @@ public class TestNodeManager {
 
       List<DatanodeID> staleNodeList = nodeManager.getNodes(NodeManager
           .NODESTATE.STALE);
-      assertEquals("Expected to find 1 stale node", 1, nodeManager
-          .getNodeCount(STALE));
-      assertEquals("Expected to find 1 stale node", 1, staleNodeList.size());
+      assertEquals("Expected to find 1 stale node",
+          1, nodeManager.getNodeCount(STALE));
+      assertEquals("Expected to find 1 stale node",
+          1, staleNodeList.size());
       assertEquals("Stale node is not the expected ID", staleNode
           .getDatanodeUuid(), staleNodeList.get(0).getDatanodeUuid());
     }
@@ -403,7 +404,8 @@ public class TestNodeManager {
       List<DatanodeID> deadNodeList = nodeManager.getNodes(DEAD);
       assertEquals("Expected to find 1 dead node", 1,
           nodeManager.getNodeCount(DEAD));
-      assertEquals("Expected to find 1 dead node", 1, deadNodeList.size());
+      assertEquals("Expected to find 1 dead node",
+          1, deadNodeList.size());
       assertEquals("Dead node is not the expected ID", deadNode
           .getDatanodeUuid(), deadNodeList.get(0).getDatanodeUuid());
     }
@@ -424,8 +426,8 @@ public class TestNodeManager {
           GenericTestUtils.LogCapturer.captureLogs(SCMNodeManager.LOG);
       nodeManager.sendHeartbeat(null, null);
       logCapturer.stopCapturing();
-      assertThat(logCapturer.getOutput(), containsString("Datanode ID in " +
-          "heartbeat is null"));
+      assertThat(logCapturer.getOutput(),
+          containsString("Datanode ID in heartbeat is null"));
     }
   }
 
@@ -569,15 +571,18 @@ public class TestNodeManager {
       assertEquals(1, nodeManager.getNodeCount(STALE));
       assertEquals(1, nodeManager.getNodeCount(DEAD));
 
-      assertEquals("Expected one healthy node", 1, healthyList.size());
+      assertEquals("Expected one healthy node",
+          1, healthyList.size());
       assertEquals("Healthy node is not the expected ID", healthyNode
           .getDatanodeUuid(), healthyList.get(0).getDatanodeUuid());
 
-      assertEquals("Expected one stale node", 1, staleList.size());
+      assertEquals("Expected one stale node",
+          1, staleList.size());
       assertEquals("Stale node is not the expected ID", staleNode
           .getDatanodeUuid(), staleList.get(0).getDatanodeUuid());
 
-      assertEquals("Expected one dead node", 1, deadList.size());
+      assertEquals("Expected one dead node",
+          1, deadList.size());
       assertEquals("Dead node is not the expected ID", deadNode
           .getDatanodeUuid(), deadList.get(0).getDatanodeUuid());
       /**
@@ -781,8 +786,8 @@ public class TestNodeManager {
 
       GenericTestUtils.waitFor(() -> findNodes(nodeManager, staleCount, STALE),
           500, 20 * 1000);
-      assertEquals("Node count mismatch", healthyCount + staleCount, 
nodeManager
-          .getAllNodes().size());
+      assertEquals("Node count mismatch",
+          healthyCount + staleCount, nodeManager.getAllNodes().size());
 
       thread1.interrupt();
       thread2.interrupt();
@@ -921,8 +926,8 @@ public class TestNodeManager {
         nodeManager.sendHeartbeat(datanodeID,
             nrb.addStorageReport(srb).build());
       }
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatThead(), 100,
-          4 * 1000);
+      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
+          100, 4 * 1000);
       assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
       assertEquals(capacity * nodeCount,
           nodeManager.getStats().getCapacity());
@@ -984,11 +989,18 @@ public class TestNodeManager {
 
       // Test NodeManager#getNodeStats
       assertEquals(nodeCount, nodeManager.getNodeStats().size());
-      assertEquals(capacity, nodeManager.getNodeStats().get(0).getCapacity());
+      assertEquals(capacity, 
nodeManager.getNodeStat(datanodeID).getCapacity());
       assertEquals(expectedScmUsed,
-          nodeManager.getNodeStats().get(0).getScmUsed());
+          nodeManager.getNodeStat(datanodeID).getScmUsed());
       assertEquals(expectedRemaining,
-          nodeManager.getNodeStats().get(0).getRemaining());
+          nodeManager.getNodeStat(datanodeID).getRemaining());
+
+      // Compare the result from
+      // NodeManager#getNodeStats and NodeManager#getNodeStat
+      SCMNodeStat stat1 = nodeManager.getNodeStats().
+          get(datanodeID.getDatanodeUuid());
+      SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeID);
+      assertEquals(stat1, stat2);
 
       // Wait up to 4s so that the node becomes stale
       // Verify the usage info should be unchanged.
@@ -996,11 +1008,11 @@ public class TestNodeManager {
           () -> nodeManager.getNodeCount(NodeManager.NODESTATE.STALE) == 1, 
100,
           4 * 1000);
       assertEquals(nodeCount, nodeManager.getNodeStats().size());
-      assertEquals(capacity, nodeManager.getNodeStats().get(0).getCapacity());
+      assertEquals(capacity, 
nodeManager.getNodeStat(datanodeID).getCapacity());
       assertEquals(expectedScmUsed,
-          nodeManager.getNodeStats().get(0).getScmUsed());
+          nodeManager.getNodeStat(datanodeID).getScmUsed());
       assertEquals(expectedRemaining,
-          nodeManager.getNodeStats().get(0).getRemaining());
+          nodeManager.getNodeStat(datanodeID).getRemaining());
 
       // Wait up to 4 more seconds so the node becomes dead
       // Verify usage info should be updated.
@@ -1031,11 +1043,11 @@ public class TestNodeManager {
           () -> nodeManager.getStats().getScmUsed() == expectedScmUsed, 100,
           4 * 1000);
       assertEquals(nodeCount, nodeManager.getNodeStats().size());
-      assertEquals(capacity, nodeManager.getNodeStats().get(0).getCapacity());
+      assertEquals(capacity, 
nodeManager.getNodeStat(datanodeID).getCapacity());
       assertEquals(expectedScmUsed,
-          nodeManager.getNodeStats().get(0).getScmUsed());
+          nodeManager.getNodeStat(datanodeID).getScmUsed());
       assertEquals(expectedRemaining,
-          nodeManager.getNodeStats().get(0).getRemaining());
+          nodeManager.getNodeStat(datanodeID).getRemaining());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
index afe22df..56db6fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
@@ -268,7 +268,7 @@ public class TestOzoneVolumes {
    *
    * @throws IOException
    */
-  @Test
+  //@Test
   public void testCreateVolumesInLoop() throws IOException {
     SimpleDateFormat format =
         new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39058dd6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
index eb7db6f..92031d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.ozone.web.client;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -156,7 +158,8 @@ public class TestVolume {
     assertTrue(ovols.size() >= 10);
   }
 
-  @Test
+  //@Test
+  // Takes 3m to run, disable for now.
   public void testListVolumePagination() throws OzoneException, IOException {
     final int volCount = 2000;
     final int step = 100;
@@ -179,15 +182,16 @@ public class TestVolume {
     Assert.assertEquals(volCount / step, pagecount);
   }
 
-
-  @Test
+  //@Test
   public void testListAllVolumes() throws OzoneException, IOException {
     final int volCount = 200;
     final int step = 10;
     client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);
     for (int x = 0; x < volCount; x++) {
-      String userName = "frodo" + x;
-      String volumeName = "vol"+ x;
+      String userName = "frodo" +
+          RandomStringUtils.randomAlphabetic(5).toLowerCase();
+      String volumeName = "vol" +
+          RandomStringUtils.randomAlphabetic(5).toLowerCase();
       OzoneVolume vol = client.createVolume(volumeName, userName, "100TB");
       assertNotNull(vol);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to