http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
deleted file mode 100644
index 24a16c7..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ /dev/null
@@ -1,433 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.mockito.Mockito;
-import static org.mockito.Mockito.when;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol
-    .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol
-    .proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.protocol
-    .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
-import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * Stateless helper functions to handler scm/datanode connection.
- */
-public final class TestUtils {
-
-  private static ThreadLocalRandom random = ThreadLocalRandom.current();
-
-  private TestUtils() {
-  }
-
-  /**
-   * Creates DatanodeDetails with random UUID.
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails randomDatanodeDetails() {
-    return createDatanodeDetails(UUID.randomUUID());
-  }
-
-  /**
-   * Creates DatanodeDetails using the given UUID.
-   *
-   * @param uuid Datanode's UUID
-   *
-   * @return DatanodeDetails
-   */
-  private static DatanodeDetails createDatanodeDetails(UUID uuid) {
-    String ipAddress = random.nextInt(256)
-        + "." + random.nextInt(256)
-        + "." + random.nextInt(256)
-        + "." + random.nextInt(256);
-    return createDatanodeDetails(uuid.toString(), "localhost", ipAddress);
-  }
-
-  /**
-   * Generates DatanodeDetails from RegisteredCommand.
-   *
-   * @param registeredCommand registration response from SCM
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails getDatanodeDetails(
-      RegisteredCommand registeredCommand) {
-    return createDatanodeDetails(registeredCommand.getDatanodeUUID(),
-        registeredCommand.getHostName(), registeredCommand.getIpAddress());
-  }
-
-  /**
-   * Creates DatanodeDetails with the given information.
-   *
-   * @param uuid      Datanode's UUID
-   * @param hostname  hostname of Datanode
-   * @param ipAddress ip address of Datanode
-   *
-   * @return DatanodeDetails
-   */
-  private static DatanodeDetails createDatanodeDetails(String uuid,
-      String hostname, String ipAddress) {
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(uuid)
-        .setHostName(hostname)
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-
-  /**
-   * Creates a random DatanodeDetails and register it with the given
-   * NodeManager.
-   *
-   * @param nodeManager NodeManager
-   *
-   * @return DatanodeDetails
-   */
-  public static DatanodeDetails createRandomDatanodeAndRegister(
-      SCMNodeManager nodeManager) {
-    return getDatanodeDetails(
-        nodeManager.register(randomDatanodeDetails(), null,
-                getRandomPipelineReports()));
-  }
-
-  /**
-   * Get specified number of DatanodeDetails and register them with node
-   * manager.
-   *
-   * @param nodeManager node manager to register the datanode ids.
-   * @param count       number of DatanodeDetails needed.
-   *
-   * @return list of DatanodeDetails
-   */
-  public static List<DatanodeDetails> getListOfRegisteredDatanodeDetails(
-      SCMNodeManager nodeManager, int count) {
-    ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
-    for (int i = 0; i < count; i++) {
-      datanodes.add(createRandomDatanodeAndRegister(nodeManager));
-    }
-    return datanodes;
-  }
-
-  /**
-   * Generates a random NodeReport.
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto getRandomNodeReport() {
-    return getRandomNodeReport(1);
-  }
-
-  /**
-   * Generates random NodeReport with the given number of storage report in it.
-   *
-   * @param numberOfStorageReport number of storage report this node report
-   *                              should have
-   * @return NodeReportProto
-   */
-  public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) 
{
-    UUID nodeId = UUID.randomUUID();
-    return getRandomNodeReport(nodeId, File.separator + nodeId,
-        numberOfStorageReport);
-  }
-
-  /**
-   * Generates random NodeReport for the given nodeId with the given
-   * base path and number of storage report in it.
-   *
-   * @param nodeId                datanode id
-   * @param basePath              base path of storage directory
-   * @param numberOfStorageReport number of storage report
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto getRandomNodeReport(UUID nodeId,
-      String basePath, int numberOfStorageReport) {
-    List<StorageReportProto> storageReports = new ArrayList<>();
-    for (int i = 0; i < numberOfStorageReport; i++) {
-      storageReports.add(getRandomStorageReport(nodeId,
-          basePath + File.separator + i));
-    }
-    return createNodeReport(storageReports);
-  }
-
-  /**
-   * Creates NodeReport with the given storage reports.
-   *
-   * @param reports one or more storage report
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto createNodeReport(
-      StorageReportProto... reports) {
-    return createNodeReport(Arrays.asList(reports));
-  }
-
-  /**
-   * Creates NodeReport with the given storage reports.
-   *
-   * @param reports storage reports to be included in the node report.
-   *
-   * @return NodeReportProto
-   */
-  public static NodeReportProto createNodeReport(
-      List<StorageReportProto> reports) {
-    NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder();
-    nodeReport.addAllStorageReport(reports);
-    return nodeReport.build();
-  }
-
-  /**
-   * Generates random storage report.
-   *
-   * @param nodeId datanode id for which the storage report belongs to
-   * @param path   path of the storage
-   *
-   * @return StorageReportProto
-   */
-  public static StorageReportProto getRandomStorageReport(UUID nodeId,
-      String path) {
-    return createStorageReport(nodeId, path,
-        random.nextInt(1000),
-        random.nextInt(500),
-        random.nextInt(500),
-        StorageTypeProto.DISK);
-  }
-
-  /**
-   * Creates storage report with the given information.
-   *
-   * @param nodeId    datanode id
-   * @param path      storage dir
-   * @param capacity  storage size
-   * @param used      space used
-   * @param remaining space remaining
-   * @param type      type of storage
-   *
-   * @return StorageReportProto
-   */
-  public static StorageReportProto createStorageReport(UUID nodeId, String 
path,
-      long capacity, long used, long remaining, StorageTypeProto type) {
-    Preconditions.checkNotNull(nodeId);
-    Preconditions.checkNotNull(path);
-    StorageReportProto.Builder srb = StorageReportProto.newBuilder();
-    srb.setStorageUuid(nodeId.toString())
-        .setStorageLocation(path)
-        .setCapacity(capacity)
-        .setScmUsed(used)
-        .setRemaining(remaining);
-    StorageTypeProto storageTypeProto =
-        type == null ? StorageTypeProto.DISK : type;
-    srb.setStorageType(storageTypeProto);
-    return srb.build();
-  }
-
-
-  /**
-   * Generates random container reports.
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getRandomContainerReports() {
-    return getRandomContainerReports(1);
-  }
-
-  /**
-   * Generates random container report with the given number of containers.
-   *
-   * @param numberOfContainers number of containers to be in container report
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getRandomContainerReports(
-      int numberOfContainers) {
-    List<ContainerInfo> containerInfos = new ArrayList<>();
-    for (int i = 0; i < numberOfContainers; i++) {
-      containerInfos.add(getRandomContainerInfo(i));
-    }
-    return getContainerReports(containerInfos);
-  }
-
-
-  public static PipelineReportsProto getRandomPipelineReports() {
-    return PipelineReportsProto.newBuilder().build();
-  }
-
-  /**
-   * Creates container report with the given ContainerInfo(s).
-   *
-   * @param containerInfos one or more ContainerInfo
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getContainerReports(
-      ContainerInfo... containerInfos) {
-    return getContainerReports(Arrays.asList(containerInfos));
-  }
-
-  /**
-   * Creates container report with the given ContainerInfo(s).
-   *
-   * @param containerInfos list of ContainerInfo
-   *
-   * @return ContainerReportsProto
-   */
-  public static ContainerReportsProto getContainerReports(
-      List<ContainerInfo> containerInfos) {
-    ContainerReportsProto.Builder
-        reportsBuilder = ContainerReportsProto.newBuilder();
-    for (ContainerInfo containerInfo : containerInfos) {
-      reportsBuilder.addReports(containerInfo);
-    }
-    return reportsBuilder.build();
-  }
-
-  /**
-   * Generates random ContainerInfo.
-   *
-   * @param containerId container id of the ContainerInfo
-   *
-   * @return ContainerInfo
-   */
-  public static ContainerInfo getRandomContainerInfo(long containerId) {
-    return createContainerInfo(containerId,
-        OzoneConsts.GB * 5,
-        random.nextLong(1000),
-        OzoneConsts.GB * random.nextInt(5),
-        random.nextLong(1000),
-        OzoneConsts.GB * random.nextInt(2),
-        random.nextLong(1000),
-        OzoneConsts.GB * random.nextInt(5));
-  }
-
-  /**
-   * Creates ContainerInfo with the given details.
-   *
-   * @param containerId id of the container
-   * @param size        size of container
-   * @param keyCount    number of keys
-   * @param bytesUsed   bytes used by the container
-   * @param readCount   number of reads
-   * @param readBytes   bytes read
-   * @param writeCount  number of writes
-   * @param writeBytes  bytes written
-   *
-   * @return ContainerInfo
-   */
-  public static ContainerInfo createContainerInfo(
-      long containerId, long size, long keyCount, long bytesUsed,
-      long readCount, long readBytes, long writeCount, long writeBytes) {
-    return ContainerInfo.newBuilder()
-        .setContainerID(containerId)
-        .setSize(size)
-        .setKeyCount(keyCount)
-        .setUsed(bytesUsed)
-        .setReadCount(readCount)
-        .setReadBytes(readBytes)
-        .setWriteCount(writeCount)
-        .setWriteBytes(writeBytes)
-        .build();
-  }
-
-  /**
-   * Create Command Status report object.
-   * @return CommandStatusReportsProto
-   */
-  public static CommandStatusReportsProto createCommandStatusReport(
-      List<CommandStatus> reports) {
-    CommandStatusReportsProto.Builder report = CommandStatusReportsProto
-        .newBuilder();
-    report.addAllCmdStatus(reports);
-    return report.build();
-  }
-
-  public static
-      org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo
-      allocateContainer(ContainerStateManager containerStateManager)
-      throws IOException {
-
-    PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class);
-
-    Pipeline pipeline = new Pipeline("leader", 
HddsProtos.LifeCycleState.CLOSED,
-        HddsProtos.ReplicationType.STAND_ALONE,
-        HddsProtos.ReplicationFactor.THREE,
-        PipelineID.randomId());
-
-    when(pipelineSelector
-        .getReplicationPipeline(HddsProtos.ReplicationType.STAND_ALONE,
-            HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline);
-
-    return containerStateManager
-        .allocateContainer(pipelineSelector,
-            HddsProtos.ReplicationType.STAND_ALONE,
-            HddsProtos.ReplicationFactor.THREE, "root").getContainerInfo();
-
-  }
-
-  public static void closeContainer(ContainerStateManager 
containerStateManager,
-      org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo
-          container)
-      throws SCMException {
-
-    containerStateManager.getContainerStateMap()
-        .updateState(container, container.getState(), LifeCycleState.CLOSING);
-
-    containerStateManager.getContainerStateMap()
-        .updateState(container, container.getState(), LifeCycleState.CLOSED);
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
deleted file mode 100644
index e70e444..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.block;
-
-import java.util.UUID;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMStorage;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Paths;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConsts.GB;
-import static org.apache.hadoop.ozone.OzoneConsts.MB;
-
-
-/**
- * Tests for SCM Block Manager.
- */
-public class TestBlockManager implements EventHandler<Boolean> {
-  private static ContainerMapping mapping;
-  private static MockNodeManager nodeManager;
-  private static BlockManagerImpl blockManager;
-  private static File testDir;
-  private final static long DEFAULT_BLOCK_SIZE = 128 * MB;
-  private static HddsProtos.ReplicationFactor factor;
-  private static HddsProtos.ReplicationType type;
-  private static String containerOwner = "OZONE";
-  private static EventQueue eventQueue;
-
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @Before
-  public void setUp() throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
-
-    String path = GenericTestUtils
-        .getTempPath(TestBlockManager.class.getSimpleName());
-    testDir = Paths.get(path).toFile();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, path);
-    eventQueue = new EventQueue();
-    boolean folderExisted = testDir.exists() || testDir.mkdirs();
-    if (!folderExisted) {
-      throw new IOException("Unable to create test directory path");
-    }
-    nodeManager = new MockNodeManager(true, 10);
-    mapping = new ContainerMapping(conf, nodeManager, 128, eventQueue);
-    blockManager = new BlockManagerImpl(conf,
-        nodeManager, mapping, eventQueue);
-    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);
-    eventQueue.addHandler(SCMEvents.START_REPLICATION, this);
-    if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){
-      factor = HddsProtos.ReplicationFactor.THREE;
-      type = HddsProtos.ReplicationType.RATIS;
-    } else {
-      factor = HddsProtos.ReplicationFactor.ONE;
-      type = HddsProtos.ReplicationType.STAND_ALONE;
-    }
-  }
-
-  @After
-  public void cleanup() throws IOException {
-    blockManager.close();
-    mapping.close();
-    FileUtil.fullyDelete(testDir);
-  }
-
-  private static StorageContainerManager getScm(OzoneConfiguration conf)
-      throws IOException {
-    conf.setBoolean(OZONE_ENABLED, true);
-    SCMStorage scmStore = new SCMStorage(conf);
-    if(scmStore.getState() != StorageState.INITIALIZED) {
-      String clusterId = UUID.randomUUID().toString();
-      String scmId = UUID.randomUUID().toString();
-      scmStore.setClusterId(clusterId);
-      scmStore.setScmId(scmId);
-      // writes the version file properties
-      scmStore.initialize();
-    }
-    return StorageContainerManager.createSCM(null, conf);
-  }
-
-  @Test
-  public void testAllocateBlock() throws Exception {
-    eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
-    GenericTestUtils.waitFor(() -> {
-      return !blockManager.isScmInChillMode();
-    }, 10, 1000 * 5);
-    AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner);
-    Assert.assertNotNull(block);
-  }
-
-  @Test
-  public void testAllocateOversizedBlock() throws Exception {
-    eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
-    GenericTestUtils.waitFor(() -> {
-      return !blockManager.isScmInChillMode();
-    }, 10, 1000 * 5);
-    long size = 6 * GB;
-    thrown.expectMessage("Unsupported block size");
-    AllocatedBlock block = blockManager.allocateBlock(size,
-        type, factor, containerOwner);
-  }
-
-
-  @Test
-  public void testAllocateBlockFailureInChillMode() throws Exception {
-    eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, true);
-    GenericTestUtils.waitFor(() -> {
-      return blockManager.isScmInChillMode();
-    }, 10, 1000 * 5);
-    // Test1: In chill mode expect an SCMException.
-    thrown.expectMessage("ChillModePrecheck failed for "
-        + "allocateBlock");
-    blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner);
-  }
-
-  @Test
-  public void testAllocateBlockSucInChillMode() throws Exception {
-    // Test2: Exit chill mode and then try allocateBock again.
-    eventQueue.fireEvent(SCMEvents.CHILL_MODE_STATUS, false);
-    GenericTestUtils.waitFor(() -> {
-      return !blockManager.isScmInChillMode();
-    }, 10, 1000 * 5);
-    Assert.assertNotNull(blockManager.allocateBlock(DEFAULT_BLOCK_SIZE,
-        type, factor, containerOwner));
-  }
-
-  @Override
-  public void onMessage(Boolean aBoolean, EventPublisher publisher) {
-    System.out.println("test");
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
deleted file mode 100644
index 9f0e336..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ /dev/null
@@ -1,403 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.block;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.Mapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
-    .DeleteBlockTransactionResult;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataStore;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.UUID;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Mockito.when;
-
-/**
- * Tests for DeletedBlockLog.
- */
-public class TestDeletedBlockLog {
-
-  private static DeletedBlockLogImpl deletedBlockLog;
-  private OzoneConfiguration conf;
-  private File testDir;
-  private Mapping containerManager;
-  private List<DatanodeDetails> dnList;
-
-  @Before
-  public void setup() throws Exception {
-    testDir = GenericTestUtils.getTestDir(
-        TestDeletedBlockLog.class.getSimpleName());
-    conf = new OzoneConfiguration();
-    conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
-    conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    containerManager = Mockito.mock(ContainerMapping.class);
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
-    dnList = new ArrayList<>(3);
-    setupContainerManager();
-  }
-
-  private void setupContainerManager() throws IOException {
-    dnList.add(
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-    dnList.add(
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-    dnList.add(
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-
-    ContainerInfo containerInfo =
-        new ContainerInfo.Builder().setContainerID(1).build();
-    Pipeline pipeline =
-        new Pipeline(null, LifeCycleState.CLOSED,
-            ReplicationType.RATIS, ReplicationFactor.THREE, null);
-    pipeline.addMember(dnList.get(0));
-    pipeline.addMember(dnList.get(1));
-    pipeline.addMember(dnList.get(2));
-    ContainerWithPipeline containerWithPipeline =
-        new ContainerWithPipeline(containerInfo, pipeline);
-    when(containerManager.getContainerWithPipeline(anyLong()))
-        .thenReturn(containerWithPipeline);
-    when(containerManager.getContainer(anyLong())).thenReturn(containerInfo);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    deletedBlockLog.close();
-    FileUtils.deleteDirectory(testDir);
-  }
-
-  private Map<Long, List<Long>> generateData(int dataSize) {
-    Map<Long, List<Long>> blockMap = new HashMap<>();
-    Random random = new Random(1);
-    int continerIDBase = random.nextInt(100);
-    int localIDBase = random.nextInt(1000);
-    for (int i = 0; i < dataSize; i++) {
-      long containerID = continerIDBase + i;
-      List<Long> blocks = new ArrayList<>();
-      int blockSize = random.nextInt(30) + 1;
-      for (int j = 0; j < blockSize; j++)  {
-        long localID = localIDBase + j;
-        blocks.add(localID);
-      }
-      blockMap.put(containerID, blocks);
-    }
-    return blockMap;
-  }
-
-  private void commitTransactions(
-      List<DeleteBlockTransactionResult> transactionResults,
-      DatanodeDetails... dns) {
-    for (DatanodeDetails dnDetails : dns) {
-      deletedBlockLog
-          .commitTransactions(transactionResults, dnDetails.getUuid());
-    }
-  }
-
-  private void commitTransactions(
-      List<DeleteBlockTransactionResult> transactionResults) {
-    commitTransactions(transactionResults,
-        dnList.toArray(new DatanodeDetails[3]));
-  }
-
-  private void commitTransactions(
-      Collection<DeletedBlocksTransaction> deletedBlocksTransactions,
-      DatanodeDetails... dns) {
-    commitTransactions(deletedBlocksTransactions.stream()
-        .map(this::createDeleteBlockTransactionResult)
-        .collect(Collectors.toList()), dns);
-  }
-
-  private void commitTransactions(
-      Collection<DeletedBlocksTransaction> deletedBlocksTransactions) {
-    commitTransactions(deletedBlocksTransactions.stream()
-        .map(this::createDeleteBlockTransactionResult)
-        .collect(Collectors.toList()));
-  }
-
-  private DeleteBlockTransactionResult createDeleteBlockTransactionResult(
-      DeletedBlocksTransaction transaction) {
-    return DeleteBlockTransactionResult.newBuilder()
-        .setContainerID(transaction.getContainerID()).setSuccess(true)
-        .setTxID(transaction.getTxID()).build();
-  }
-
-  private List<DeletedBlocksTransaction> getTransactions(
-      int maximumAllowedTXNum) throws IOException {
-    DatanodeDeletedBlockTransactions transactions =
-        new DatanodeDeletedBlockTransactions(containerManager,
-            maximumAllowedTXNum, 3);
-    deletedBlockLog.getTransactions(transactions);
-    return transactions.getDatanodeTransactions(dnList.get(0).getUuid());
-  }
-
-  @Test
-  public void testIncrementCount() throws Exception {
-    int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
-
-    // Create 30 TXs in the log.
-    for (Map.Entry<Long, List<Long>> entry : generateData(30).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
-
-    // This will return all TXs, total num 30.
-    List<DeletedBlocksTransaction> blocks =
-        getTransactions(40);
-    List<Long> txIDs = blocks.stream().map(DeletedBlocksTransaction::getTxID)
-        .collect(Collectors.toList());
-
-    for (int i = 0; i < maxRetry; i++) {
-      deletedBlockLog.incrementCount(txIDs);
-    }
-
-    // Increment another time so it exceed the maxRetry.
-    // On this call, count will be set to -1 which means TX eventually fails.
-    deletedBlockLog.incrementCount(txIDs);
-    blocks = getTransactions(40);
-    for (DeletedBlocksTransaction block : blocks) {
-      Assert.assertEquals(-1, block.getCount());
-    }
-
-    // If all TXs are failed, getTransactions call will always return nothing.
-    blocks = getTransactions(40);
-    Assert.assertEquals(blocks.size(), 0);
-  }
-
-  @Test
-  public void testCommitTransactions() throws Exception {
-    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
-    List<DeletedBlocksTransaction> blocks =
-        getTransactions(20);
-    // Add an invalid txn.
-    blocks.add(
-        DeletedBlocksTransaction.newBuilder().setContainerID(1).setTxID(70)
-            .setCount(0).addLocalID(0).build());
-    commitTransactions(blocks);
-    blocks.remove(blocks.size() - 1);
-
-    blocks = getTransactions(50);
-    Assert.assertEquals(30, blocks.size());
-    commitTransactions(blocks, dnList.get(1), dnList.get(2),
-        DatanodeDetails.newBuilder().setUuid(UUID.randomUUID().toString())
-            .build());
-
-    blocks = getTransactions(50);
-    Assert.assertEquals(30, blocks.size());
-    commitTransactions(blocks, dnList.get(0));
-
-    blocks = getTransactions(50);
-    Assert.assertEquals(0, blocks.size());
-  }
-
-  @Test
-  public void testRandomOperateTransactions() throws Exception {
-    Random random = new Random();
-    int added = 0, committed = 0;
-    List<DeletedBlocksTransaction> blocks = new ArrayList<>();
-    List<Long> txIDs = new ArrayList<>();
-    byte[] latestTxid = DFSUtil.string2Bytes("#LATEST_TXID#");
-    MetadataKeyFilters.MetadataKeyFilter avoidLatestTxid =
-        (preKey, currentKey, nextKey) ->
-            !Arrays.equals(latestTxid, currentKey);
-    MetadataStore store = deletedBlockLog.getDeletedStore();
-    // Randomly add/get/commit/increase transactions.
-    for (int i = 0; i < 100; i++) {
-      int state = random.nextInt(4);
-      if (state == 0) {
-        for (Map.Entry<Long, List<Long>> entry :
-            generateData(10).entrySet()){
-          deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-        }
-        added += 10;
-      } else if (state == 1) {
-        blocks = getTransactions(20);
-        txIDs = new ArrayList<>();
-        for (DeletedBlocksTransaction block : blocks) {
-          txIDs.add(block.getTxID());
-        }
-        deletedBlockLog.incrementCount(txIDs);
-      } else if (state == 2) {
-        commitTransactions(blocks);
-        committed += blocks.size();
-        blocks = new ArrayList<>();
-      } else {
-        // verify the number of added and committed.
-        List<Map.Entry<byte[], byte[]>> result =
-            store.getRangeKVs(null, added, avoidLatestTxid);
-        Assert.assertEquals(added, result.size() + committed);
-      }
-    }
-    blocks = getTransactions(1000);
-    commitTransactions(blocks);
-  }
-
-  @Test
-  public void testPersistence() throws Exception {
-    for (Map.Entry<Long, List<Long>> entry : generateData(50).entrySet()){
-      deletedBlockLog.addTransaction(entry.getKey(), entry.getValue());
-    }
-    // close db and reopen it again to make sure
-    // transactions are stored persistently.
-    deletedBlockLog.close();
-    deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager);
-    List<DeletedBlocksTransaction> blocks =
-        getTransactions(10);
-    commitTransactions(blocks);
-    blocks = getTransactions(100);
-    Assert.assertEquals(40, blocks.size());
-    commitTransactions(blocks);
-  }
-
-  @Test
-  public void testDeletedBlockTransactions() throws IOException {
-    int txNum = 10;
-    int maximumAllowedTXNum = 5;
-    List<DeletedBlocksTransaction> blocks = null;
-    List<Long> containerIDs = new LinkedList<>();
-    DatanodeDetails dnId1 = dnList.get(0), dnId2 = dnList.get(1);
-
-    int count = 0;
-    long containerID = 0L;
-
-    // Creates {TXNum} TX in the log.
-    for (Map.Entry<Long, List<Long>> entry : generateData(txNum)
-        .entrySet()) {
-      count++;
-      containerID = entry.getKey();
-      containerIDs.add(containerID);
-      deletedBlockLog.addTransaction(containerID, entry.getValue());
-
-      // make TX[1-6] for datanode1; TX[7-10] for datanode2
-      if (count <= (maximumAllowedTXNum + 1)) {
-        mockContainerInfo(containerID, dnId1);
-      } else {
-        mockContainerInfo(containerID, dnId2);
-      }
-    }
-
-    DatanodeDeletedBlockTransactions transactions =
-        new DatanodeDeletedBlockTransactions(containerManager,
-            maximumAllowedTXNum, 2);
-    deletedBlockLog.getTransactions(transactions);
-
-    for (UUID id : transactions.getDatanodeIDs()) {
-      List<DeletedBlocksTransaction> txs = transactions
-          .getDatanodeTransactions(id);
-      // delete TX ID
-      commitTransactions(txs);
-    }
-
-    blocks = getTransactions(txNum);
-    // There should be one block remained since dnID1 reaches
-    // the maximum value (5).
-    Assert.assertEquals(1, blocks.size());
-
-    Assert.assertFalse(transactions.isFull());
-    // The number of TX in dnID1 won't more than maximum value.
-    Assert.assertEquals(maximumAllowedTXNum,
-        transactions.getDatanodeTransactions(dnId1.getUuid()).size());
-
-    int size = transactions.getDatanodeTransactions(dnId2.getUuid()).size();
-    // add duplicated container in dnID2, this should be failed.
-    DeletedBlocksTransaction.Builder builder =
-        DeletedBlocksTransaction.newBuilder();
-    builder.setTxID(11);
-    builder.setContainerID(containerID);
-    builder.setCount(0);
-    transactions.addTransaction(builder.build(),
-        null);
-
-    // The number of TX in dnID2 should not be changed.
-    Assert.assertEquals(size,
-        transactions.getDatanodeTransactions(dnId2.getUuid()).size());
-
-    // Add new TX in dnID2, then dnID2 will reach maximum value.
-    containerID = RandomUtils.nextLong();
-    builder = DeletedBlocksTransaction.newBuilder();
-    builder.setTxID(12);
-    builder.setContainerID(containerID);
-    builder.setCount(0);
-    mockContainerInfo(containerID, dnId2);
-    transactions.addTransaction(builder.build(),
-        null);
-    // Since all node are full, then transactions is full.
-    Assert.assertTrue(transactions.isFull());
-  }
-
-  private void mockContainerInfo(long containerID, DatanodeDetails dd)
-      throws IOException {
-    Pipeline pipeline =
-        new Pipeline("fake", LifeCycleState.OPEN,
-            ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
-            PipelineID.randomId());
-    pipeline.addMember(dd);
-
-    ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    builder.setPipelineID(pipeline.getId())
-        .setReplicationType(pipeline.getType())
-        .setReplicationFactor(pipeline.getFactor());
-
-    ContainerInfo containerInfo = builder.build();
-    ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(
-        containerInfo, pipeline);
-    Mockito.doReturn(containerInfo).when(containerManager)
-        .getContainer(containerID);
-    Mockito.doReturn(containerWithPipeline).when(containerManager)
-        .getContainerWithPipeline(containerID);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
deleted file mode 100644
index a67df69..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/package-info.java
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/**
- * Make checkstyle happy.
- * */
-package org.apache.hadoop.hdds.scm.block;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
deleted file mode 100644
index afa25e2..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.command;
-
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .CommandStatusReportFromDatanode;
-
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-public class TestCommandStatusReportHandler implements EventPublisher {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(TestCommandStatusReportHandler.class);
-  private CommandStatusReportHandler cmdStatusReportHandler;
-  private String storagePath = GenericTestUtils.getRandomizedTempPath()
-      .concat("/" + UUID.randomUUID().toString());
-
-  @Before
-  public void setup() {
-    cmdStatusReportHandler = new CommandStatusReportHandler();
-  }
-
-  @Test
-  public void testCommandStatusReport() {
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(LOG);
-
-    CommandStatusReportFromDatanode report = this.getStatusReport(Collections
-        .emptyList());
-    cmdStatusReportHandler.onMessage(report, this);
-    assertFalse(logCapturer.getOutput().contains("Delete_Block_Status"));
-    assertFalse(logCapturer.getOutput().contains(
-        "Close_Container_Command_Status"));
-    assertFalse(logCapturer.getOutput().contains("Replicate_Command_Status"));
-
-
-    report = this.getStatusReport(this.getCommandStatusList());
-    cmdStatusReportHandler.onMessage(report, this);
-    assertTrue(logCapturer.getOutput().contains("firing event of type " +
-        "Delete_Block_Status"));
-    assertTrue(logCapturer.getOutput().contains("firing event of type " +
-        "Close_Container_Command_Status"));
-    assertTrue(logCapturer.getOutput().contains("firing event of type " +
-        "Replicate_Command_Status"));
-
-    assertTrue(logCapturer.getOutput().contains("type: " +
-        "closeContainerCommand"));
-    assertTrue(logCapturer.getOutput().contains("type: " +
-        "deleteBlocksCommand"));
-    assertTrue(logCapturer.getOutput().contains("type: " +
-        "replicateContainerCommand"));
-
-  }
-
-  private CommandStatusReportFromDatanode getStatusReport(
-      List<CommandStatus> reports) {
-    CommandStatusReportsProto report = TestUtils.createCommandStatusReport(
-        reports);
-    DatanodeDetails dn = TestUtils.randomDatanodeDetails();
-    return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode(
-        dn, report);
-  }
-
-  @Override
-  public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent
-      (EVENT_TYPE event, PAYLOAD payload) {
-    LOG.info("firing event of type {}, payload {}", event.getName(), payload
-        .toString());
-  }
-
-  private List<CommandStatus> getCommandStatusList() {
-    List<CommandStatus> reports = new ArrayList<>(3);
-
-    // Add status message for replication, close container and delete block
-    // command.
-    CommandStatus.Builder builder = CommandStatus.newBuilder();
-
-    builder.setCmdId(HddsIdFactory.getLongId())
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.deleteBlocksCommand);
-    reports.add(builder.build());
-
-    builder.setCmdId(HddsIdFactory.getLongId())
-        .setStatus(CommandStatus.Status.EXECUTED)
-        .setType(Type.closeContainerCommand);
-    reports.add(builder.build());
-
-    builder.setMsg("Not enough space")
-        .setCmdId(HddsIdFactory.getLongId())
-        .setStatus(CommandStatus.Status.FAILED)
-        .setType(Type.replicateContainerCommand);
-    reports.add(builder.build());
-    return reports;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
deleted file mode 100644
index f529c20..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Make CheckStyle Happy.
- */
-package org.apache.hadoop.hdds.scm.command;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
deleted file mode 100644
index 3221053..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ /dev/null
@@ -1,592 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
-import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
-import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
-import org.apache.hadoop.hdds.scm.node.states.ReportResult;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.protocol.VersionResponse;
-import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.assertj.core.util.Preconditions;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-
-/**
- * Test Helper for testing container Mapping.
- */
-public class MockNodeManager implements NodeManager {
-  private final static NodeData[] NODES = {
-      new NodeData(10L * OzoneConsts.TB, OzoneConsts.GB),
-      new NodeData(64L * OzoneConsts.TB, 100 * OzoneConsts.GB),
-      new NodeData(128L * OzoneConsts.TB, 256 * OzoneConsts.GB),
-      new NodeData(40L * OzoneConsts.TB, OzoneConsts.TB),
-      new NodeData(256L * OzoneConsts.TB, 200 * OzoneConsts.TB),
-      new NodeData(20L * OzoneConsts.TB, 10 * OzoneConsts.GB),
-      new NodeData(32L * OzoneConsts.TB, 16 * OzoneConsts.TB),
-      new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB),
-      new NodeData(OzoneConsts.TB, 900 * OzoneConsts.GB, NodeData.STALE),
-      new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.STALE),
-      new NodeData(OzoneConsts.TB, 200L * OzoneConsts.GB, NodeData.DEAD)
-  };
-  private final List<DatanodeDetails> healthyNodes;
-  private final List<DatanodeDetails> staleNodes;
-  private final List<DatanodeDetails> deadNodes;
-  private final Map<UUID, SCMNodeStat> nodeMetricMap;
-  private final SCMNodeStat aggregateStat;
-  private boolean chillmode;
-  private final Map<UUID, List<SCMCommand>> commandMap;
-  private final Node2PipelineMap node2PipelineMap;
-  private final Node2ContainerMap node2ContainerMap;
-
-  public MockNodeManager(boolean initializeFakeNodes, int nodeCount) {
-    this.healthyNodes = new LinkedList<>();
-    this.staleNodes = new LinkedList<>();
-    this.deadNodes = new LinkedList<>();
-    this.nodeMetricMap = new HashMap<>();
-    this.node2PipelineMap = new Node2PipelineMap();
-    this.node2ContainerMap = new Node2ContainerMap();
-    aggregateStat = new SCMNodeStat();
-    if (initializeFakeNodes) {
-      for (int x = 0; x < nodeCount; x++) {
-        DatanodeDetails dd = TestUtils.randomDatanodeDetails();
-        populateNodeMetric(dd, x);
-      }
-    }
-    chillmode = false;
-    this.commandMap = new HashMap<>();
-  }
-
-  /**
-   * Invoked from ctor to create some node Metrics.
-   *
-   * @param datanodeDetails - Datanode details
-   */
-  private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) {
-    SCMNodeStat newStat = new SCMNodeStat();
-    long remaining =
-        NODES[x % NODES.length].capacity - NODES[x % NODES.length].used;
-    newStat.set(
-        (NODES[x % NODES.length].capacity),
-        (NODES[x % NODES.length].used), remaining);
-    this.nodeMetricMap.put(datanodeDetails.getUuid(), newStat);
-    aggregateStat.add(newStat);
-
-    if (NODES[x % NODES.length].getCurrentState() == NodeData.HEALTHY) {
-      healthyNodes.add(datanodeDetails);
-    }
-
-    if (NODES[x % NODES.length].getCurrentState() == NodeData.STALE) {
-      staleNodes.add(datanodeDetails);
-    }
-
-    if (NODES[x % NODES.length].getCurrentState() == NodeData.DEAD) {
-      deadNodes.add(datanodeDetails);
-    }
-
-  }
-
-  /**
-   * Sets the chill mode value.
-   * @param chillmode boolean
-   */
-  public void setChillmode(boolean chillmode) {
-    this.chillmode = chillmode;
-  }
-
-  /**
-   * Removes a data node from the management of this Node Manager.
-   *
-   * @param node - DataNode.
-   * @throws NodeNotFoundException
-   */
-  @Override
-  public void removeNode(DatanodeDetails node)
-      throws NodeNotFoundException {
-
-  }
-
-  /**
-   * Gets all Live Datanodes that is currently communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return List of Datanodes that are Heartbeating SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getNodes(HddsProtos.NodeState nodestate) {
-    if (nodestate == HEALTHY) {
-      return healthyNodes;
-    }
-
-    if (nodestate == STALE) {
-      return staleNodes;
-    }
-
-    if (nodestate == DEAD) {
-      return deadNodes;
-    }
-
-    return null;
-  }
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   *
-   * @param nodestate - State of the node
-   * @return int -- count
-   */
-  @Override
-  public int getNodeCount(HddsProtos.NodeState nodestate) {
-    List<DatanodeDetails> nodes = getNodes(nodestate);
-    if (nodes != null) {
-      return nodes.size();
-    }
-    return 0;
-  }
-
-  /**
-   * Get all datanodes known to SCM.
-   *
-   * @return List of DatanodeDetails known to SCM.
-   */
-  @Override
-  public List<DatanodeDetails> getAllNodes() {
-    return null;
-  }
-
-  /**
-   * Get the minimum number of nodes to get out of chill mode.
-   *
-   * @return int
-   */
-  @Override
-  public int getMinimumChillModeNodes() {
-    return 0;
-  }
-
-  /**
-   * Chill mode is the period when node manager waits for a minimum configured
-   * number of datanodes to report in. This is called chill mode to indicate 
the
-   * period before node manager gets into action.
-   * <p>
-   * Forcefully exits the chill mode, even if we have not met the minimum
-   * criteria of the nodes reporting in.
-   */
-  @Override
-  public void forceExitChillMode() {
-
-  }
-
-  /**
-   * Puts the node manager into manual chill mode.
-   */
-  @Override
-  public void enterChillMode() {
-
-  }
-
-  /**
-   * Brings node manager out of manual chill mode.
-   */
-  @Override
-  public void exitChillMode() {
-
-  }
-
-  /**
-   * Returns true if node manager is out of chill mode, else false.
-   * @return true if out of chill mode, else false
-   */
-  @Override
-  public boolean isOutOfChillMode() {
-    return !chillmode;
-  }
-
-  /**
-   * Returns a chill mode status string.
-   *
-   * @return String
-   */
-  @Override
-  public String getChillModeStatus() {
-    return null;
-  }
-
-  /**
-   * Returns the aggregated node stats.
-   * @return the aggregated node stats.
-   */
-  @Override
-  public SCMNodeStat getStats() {
-    return aggregateStat;
-  }
-
-  /**
-   * Return a map of nodes to their stats.
-   * @return a list of individual node stats (live/stale but not dead).
-   */
-  @Override
-  public Map<UUID, SCMNodeStat> getNodeStats() {
-    return nodeMetricMap;
-  }
-
-  /**
-   * Return the node stat of the specified datanode.
-   * @param datanodeDetails - datanode details.
-   * @return node stat if it is live/stale, null if it is decommissioned or
-   * doesn't exist.
-   */
-  @Override
-  public SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails) {
-    SCMNodeStat stat = nodeMetricMap.get(datanodeDetails.getUuid());
-    if (stat == null) {
-      return null;
-    }
-    return new SCMNodeMetric(stat);
-  }
-
-  /**
-   * Returns the node state of a specific node.
-   *
-   * @param dd - DatanodeDetails
-   * @return Healthy/Stale/Dead.
-   */
-  @Override
-  public HddsProtos.NodeState getNodeState(DatanodeDetails dd) {
-    return null;
-  }
-
-  /**
-   * Get set of pipelines a datanode is part of.
-   * @param dnId - datanodeID
-   * @return Set of PipelineID
-   */
-  @Override
-  public Set<PipelineID> getPipelineByDnID(UUID dnId) {
-    return node2PipelineMap.getPipelines(dnId);
-  }
-
-  /**
-   * Add pipeline information in the NodeManager.
-   * @param pipeline - Pipeline to be added
-   */
-  @Override
-  public void addPipeline(Pipeline pipeline) {
-    node2PipelineMap.addPipeline(pipeline);
-  }
-
-  /**
-   * Remove a pipeline information from the NodeManager.
-   * @param pipeline - Pipeline to be removed
-   */
-  @Override
-  public void removePipeline(Pipeline pipeline) {
-    node2PipelineMap.removePipeline(pipeline);
-  }
-
-  @Override
-  public void addDatanodeCommand(UUID dnId, SCMCommand command) {
-    if(commandMap.containsKey(dnId)) {
-      List<SCMCommand> commandList = commandMap.get(dnId);
-      Preconditions.checkNotNull(commandList);
-      commandList.add(command);
-    } else {
-      List<SCMCommand> commandList = new LinkedList<>();
-      commandList.add(command);
-      commandMap.put(dnId, commandList);
-    }
-  }
-
-  /**
-   * Empty implementation for processNodeReport.
-   *
-   * @param dnUuid
-   * @param nodeReport
-   */
-  @Override
-  public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) {
-    // do nothing
-  }
-
-  /**
-   * Update set of containers available on a datanode.
-   * @param uuid - DatanodeID
-   * @param containerIds - Set of containerIDs
-   * @throws SCMException - if datanode is not known. For new datanode use
-   *                        addDatanodeInContainerMap call.
-   */
-  @Override
-  public void setContainersForDatanode(UUID uuid, Set<ContainerID> 
containerIds)
-      throws SCMException {
-    node2ContainerMap.setContainersForDatanode(uuid, containerIds);
-  }
-
-  /**
-   * Process containerReport received from datanode.
-   * @param uuid - DataonodeID
-   * @param containerIds - Set of containerIDs
-   * @return The result after processing containerReport
-   */
-  @Override
-  public ReportResult<ContainerID> processContainerReport(UUID uuid,
-      Set<ContainerID> containerIds) {
-    return node2ContainerMap.processReport(uuid, containerIds);
-  }
-
-  /**
-   * Return set of containerIDs available on a datanode.
-   * @param uuid - DatanodeID
-   * @return - set of containerIDs
-   */
-  @Override
-  public Set<ContainerID> getContainers(UUID uuid) {
-    return node2ContainerMap.getContainers(uuid);
-  }
-
-  /**
-   * Insert a new datanode with set of containerIDs for containers available
-   * on it.
-   * @param uuid - DatanodeID
-   * @param containerIDs - Set of ContainerIDs
-   * @throws SCMException - if datanode already exists
-   */
-  @Override
-  public void addDatanodeInContainerMap(UUID uuid,
-      Set<ContainerID> containerIDs) throws SCMException {
-    node2ContainerMap.insertNewDatanode(uuid, containerIDs);
-  }
-
-  // Returns the number of commands that is queued to this node manager.
-  public int getCommandCount(DatanodeDetails dd) {
-    List<SCMCommand> list = commandMap.get(dd.getUuid());
-    return (list == null) ? 0 : list.size();
-  }
-
-  public void clearCommandQueue(UUID dnId) {
-    if(commandMap.containsKey(dnId)) {
-      commandMap.put(dnId, new LinkedList<>());
-    }
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated with it. 
If
-   * the stream is already closed then invoking this method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the close may
-   * fail require careful attention. It is strongly advised to relinquish the
-   * underlying resources and to internally <em>mark</em> the {@code Closeable}
-   * as closed, prior to throwing the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  /**
-   * Gets the version info from SCM.
-   *
-   * @param versionRequest - version Request.
-   * @return - returns SCM version info and other required information needed 
by
-   * datanode.
-   */
-  @Override
-  public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
-    return null;
-  }
-
-  /**
-   * Register the node if the node finds that it is not registered with any
-   * SCM.
-   *
-   * @param datanodeDetails DatanodeDetails
-   * @param nodeReport NodeReportProto
-   * @return SCMHeartbeatResponseProto
-   */
-  @Override
-  public RegisteredCommand register(DatanodeDetails datanodeDetails,
-      NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) {
-    return null;
-  }
-
-  /**
-   * Send heartbeat to indicate the datanode is alive and doing well.
-   *
-   * @param datanodeDetails - Datanode ID.
-   * @return SCMheartbeat response list
-   */
-  @Override
-  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
-    return null;
-  }
-
-  @Override
-  public Map<String, Integer> getNodeCount() {
-    Map<String, Integer> nodeCountMap = new HashMap<String, Integer>();
-    for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) {
-      nodeCountMap.put(state.toString(), getNodeCount(state));
-    }
-    return nodeCountMap;
-  }
-
-  /**
-   * Makes it easy to add a container.
-   *
-   * @param datanodeDetails datanode details
-   * @param size number of bytes.
-   */
-  public void addContainer(DatanodeDetails datanodeDetails, long size) {
-    SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails.getUuid());
-    if (stat != null) {
-      aggregateStat.subtract(stat);
-      stat.getCapacity().add(size);
-      aggregateStat.add(stat);
-      nodeMetricMap.put(datanodeDetails.getUuid(), stat);
-    }
-  }
-
-  /**
-   * Makes it easy to simulate a delete of a container.
-   *
-   * @param datanodeDetails datanode Details
-   * @param size number of bytes.
-   */
-  public void delContainer(DatanodeDetails datanodeDetails, long size) {
-    SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails.getUuid());
-    if (stat != null) {
-      aggregateStat.subtract(stat);
-      stat.getCapacity().subtract(size);
-      aggregateStat.add(stat);
-      nodeMetricMap.put(datanodeDetails.getUuid(), stat);
-    }
-  }
-
-  @Override
-  public void onMessage(CommandForDatanode commandForDatanode,
-                        EventPublisher publisher) {
-    addDatanodeCommand(commandForDatanode.getDatanodeId(),
-        commandForDatanode.getCommand());
-  }
-
-  /**
-   * Remove the node stats and update the storage stats
-   * in this Node Manager.
-   *
-   * @param dnUuid UUID of the datanode.
-   */
-  @Override
-  public void processDeadNode(UUID dnUuid) {
-    SCMNodeStat stat = this.nodeMetricMap.get(dnUuid);
-    if (stat != null) {
-      aggregateStat.subtract(stat);
-      stat.set(0, 0, 0);
-    }
-  }
-
-  /**
-   * A class to declare some values for the nodes so that our tests
-   * won't fail.
-   */
-  private static class NodeData {
-    public static final long HEALTHY = 1;
-    public static final long STALE = 2;
-    public static final long DEAD = 3;
-
-    private long capacity;
-    private long used;
-
-    private long currentState;
-
-    /**
-     * By default nodes are healthy.
-     * @param capacity
-     * @param used
-     */
-    NodeData(long capacity, long used) {
-      this(capacity, used, HEALTHY);
-    }
-
-    /**
-     * Constructs a nodeDefinition.
-     *
-     * @param capacity capacity.
-     * @param used used.
-     * @param currentState - Healthy, Stale and DEAD nodes.
-     */
-    NodeData(long capacity, long used, long currentState) {
-      this.capacity = capacity;
-      this.used = used;
-      this.currentState = currentState;
-    }
-
-    public long getCapacity() {
-      return capacity;
-    }
-
-    public void setCapacity(long capacity) {
-      this.capacity = capacity;
-    }
-
-    public long getUsed() {
-      return used;
-    }
-
-    public void setUsed(long used) {
-      this.used = used;
-    }
-
-    public long getCurrentState() {
-      return currentState;
-    }
-
-    public void setCurrentState(long currentState) {
-      this.currentState = currentState;
-    }
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
deleted file mode 100644
index 38050c9..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .ContainerWithPipeline;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.SCMTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-
-import static 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED;
-import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
-import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
-import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
-
-/**
- * Tests the closeContainerEventHandler class.
- */
-public class TestCloseContainerEventHandler {
-
-  private static Configuration configuration;
-  private static MockNodeManager nodeManager;
-  private static ContainerMapping mapping;
-  private static long size;
-  private static File testDir;
-  private static EventQueue eventQueue;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    configuration = SCMTestUtils.getConf();
-    size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
-        OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
-    testDir = GenericTestUtils
-        .getTestDir(TestCloseContainerEventHandler.class.getSimpleName());
-    configuration
-        .set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
-    nodeManager = new MockNodeManager(true, 10);
-    mapping = new ContainerMapping(configuration, nodeManager, 128,
-        new EventQueue());
-    eventQueue = new EventQueue();
-    eventQueue.addHandler(CLOSE_CONTAINER,
-        new CloseContainerEventHandler(mapping));
-    eventQueue.addHandler(DATANODE_COMMAND, nodeManager);
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (mapping != null) {
-      mapping.close();
-    }
-    FileUtil.fullyDelete(testDir);
-  }
-
-  @Test
-  public void testIfCloseContainerEventHadnlerInvoked() {
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(Math.abs(RandomUtils.nextInt())));
-    eventQueue.processAll(1000);
-    Assert.assertTrue(logCapturer.getOutput()
-        .contains("Close container Event triggered for container"));
-  }
-
-  @Test
-  public void testCloseContainerEventWithInvalidContainer() {
-    long id = Math.abs(RandomUtils.nextInt());
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(id));
-    eventQueue.processAll(1000);
-    Assert.assertTrue(logCapturer.getOutput()
-        .contains("Failed to update the container state"));
-  }
-
-  @Test
-  public void testCloseContainerEventWithValidContainers() throws IOException {
-
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerEventHandler.LOG);
-    ContainerWithPipeline containerWithPipeline = mapping
-        .allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
-            HddsProtos.ReplicationFactor.ONE, "ozone");
-    ContainerID id = new ContainerID(
-        containerWithPipeline.getContainerInfo().getContainerID());
-    DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
-    int closeCount = nodeManager.getCommandCount(datanode);
-    eventQueue.fireEvent(CLOSE_CONTAINER, id);
-    eventQueue.processAll(1000);
-    // At this point of time, the allocated container is not in open
-    // state, so firing close container event should not queue CLOSE
-    // command in the Datanode
-    Assert.assertEquals(0, nodeManager.getCommandCount(datanode));
-    //Execute these state transitions so that we can close the container.
-    mapping.updateContainerState(id.getId(), CREATED);
-    eventQueue.fireEvent(CLOSE_CONTAINER,
-        new ContainerID(
-            containerWithPipeline.getContainerInfo().getContainerID()));
-    eventQueue.processAll(1000);
-    Assert.assertEquals(closeCount + 1,
-        nodeManager.getCommandCount(datanode));
-    Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
-        mapping.getStateManager().getContainer(id).getState());
-  }
-
-  @Test
-  public void testCloseContainerEventWithRatis() throws IOException {
-
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(CloseContainerEventHandler.LOG);
-    ContainerWithPipeline containerWithPipeline = mapping
-        .allocateContainer(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE, "ozone");
-    ContainerID id = new ContainerID(
-        containerWithPipeline.getContainerInfo().getContainerID());
-    int[] closeCount = new int[3];
-    eventQueue.fireEvent(CLOSE_CONTAINER, id);
-    eventQueue.processAll(1000);
-    int i = 0;
-    for (DatanodeDetails details : containerWithPipeline.getPipeline()
-        .getMachines()) {
-      closeCount[i] = nodeManager.getCommandCount(details);
-      i++;
-    }
-    i = 0;
-    for (DatanodeDetails details : containerWithPipeline.getPipeline()
-        .getMachines()) {
-      Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details));
-      i++;
-    }
-    //Execute these state transitions so that we can close the container.
-    mapping.updateContainerState(id.getId(), CREATED);
-    eventQueue.fireEvent(CLOSE_CONTAINER, id);
-    eventQueue.processAll(1000);
-    i = 0;
-    // Make sure close is queued for each datanode on the pipeline
-    for (DatanodeDetails details : containerWithPipeline.getPipeline()
-        .getMachines()) {
-      Assert.assertEquals(closeCount[i] + 1,
-          nodeManager.getCommandCount(details));
-      Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
-          mapping.getStateManager().getContainer(id).getState());
-      i++;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
deleted file mode 100644
index 0997e1f..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import 
org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerActionsFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-/**
- * Tests ContainerActionsHandler.
- */
-public class TestContainerActionsHandler {
-
-  @Test
-  public void testCloseContainerAction() {
-    EventQueue queue = new EventQueue();
-    ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
-    CloseContainerEventHandler closeContainerEventHandler = Mockito.mock(
-        CloseContainerEventHandler.class);
-    queue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerEventHandler);
-    queue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
-
-    ContainerAction action = ContainerAction.newBuilder()
-        .setContainerID(1L)
-        .setAction(ContainerAction.Action.CLOSE)
-        .setReason(ContainerAction.Reason.CONTAINER_FULL)
-        .build();
-
-    ContainerActionsProto cap = ContainerActionsProto.newBuilder()
-        .addContainerActions(action)
-        .build();
-
-    ContainerActionsFromDatanode containerActions =
-        new ContainerActionsFromDatanode(
-            TestUtils.randomDatanodeDetails(), cap);
-
-    queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions);
-
-    verify(closeContainerEventHandler, times(1))
-        .onMessage(ContainerID.valueof(1L), queue);
-
-  }
-
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to