http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
deleted file mode 100644
index 3fc0dd0..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .InitDatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .RunningDatanodeState;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.nio.file.Paths;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Tests the datanode state machine class and its states.
- */
-public class TestDatanodeStateMachine {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestDatanodeStateMachine.class);
-  // Changing it to 1, as current code checks for multiple scm directories,
-  // and fail if exists
-  private final int scmServerCount = 1;
-  private List<String> serverAddresses;
-  private List<RPC.Server> scmServers;
-  private List<ScmTestMock> mockServers;
-  private ExecutorService executorService;
-  private Configuration conf;
-  private File testRoot;
-
-  @Before
-  public void setUp() throws Exception {
-    conf = SCMTestUtils.getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
-        TimeUnit.MILLISECONDS);
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
-    serverAddresses = new LinkedList<>();
-    scmServers = new LinkedList<>();
-    mockServers = new LinkedList<>();
-    for (int x = 0; x < scmServerCount; x++) {
-      int port = SCMTestUtils.getReuseableAddress().getPort();
-      String address = "127.0.0.1";
-      serverAddresses.add(address + ":" + port);
-      ScmTestMock mock = new ScmTestMock();
-      scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock,
-          new InetSocketAddress(address, port), 10));
-      mockServers.add(mock);
-    }
-
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
-        serverAddresses.toArray(new String[0]));
-
-    String path = GenericTestUtils
-        .getTempPath(TestDatanodeStateMachine.class.getSimpleName());
-    testRoot = new File(path);
-    if (!testRoot.mkdirs()) {
-      LOG.info("Required directories {} already exist.", testRoot);
-    }
-
-    File dataDir = new File(testRoot, "data");
-    conf.set(HDDS_DATANODE_DIR_KEY, dataDir.getAbsolutePath());
-    if (!dataDir.mkdirs()) {
-      LOG.info("Data dir create failed.");
-    }
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
-        new File(testRoot, "scm").getAbsolutePath());
-    path = Paths.get(path.toString(),
-        TestDatanodeStateMachine.class.getSimpleName() + ".id").toString();
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID, path);
-    executorService = HadoopExecutors.newCachedThreadPool(
-        new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("Test Data Node State Machine Thread - 
%d").build());
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    try {
-      if (executorService != null) {
-        executorService.shutdown();
-        try {
-          if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-            executorService.shutdownNow();
-          }
-
-          if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-            LOG.error("Unable to shutdown properly.");
-          }
-        } catch (InterruptedException e) {
-          LOG.error("Error attempting to shutdown.", e);
-          executorService.shutdownNow();
-        }
-      }
-      for (RPC.Server s : scmServers) {
-        s.stop();
-      }
-    } catch (Exception e) {
-      //ignore all execption from the shutdown
-    } finally {
-      FileUtil.fullyDelete(testRoot);
-    }
-  }
-
-  /**
-   * Assert that starting statemachine executes the Init State.
-   *
-   * @throws InterruptedException
-   */
-  @Test
-  public void testStartStopDatanodeStateMachine() throws IOException,
-      InterruptedException, TimeoutException {
-    try (DatanodeStateMachine stateMachine =
-        new DatanodeStateMachine(getNewDatanodeDetails(), conf)) {
-      stateMachine.startDaemon();
-      SCMConnectionManager connectionManager =
-          stateMachine.getConnectionManager();
-      GenericTestUtils.waitFor(() -> connectionManager.getValues().size() == 1,
-          1000, 30000);
-
-      stateMachine.stopDaemon();
-      assertTrue(stateMachine.isDaemonStopped());
-    }
-  }
-
-  /**
-   * This test explores the state machine by invoking each call in sequence 
just
-   * like as if the state machine would call it. Because this is a test we are
-   * able to verify each of the assumptions.
-   * <p>
-   * Here is what happens at High level.
-   * <p>
-   * 1. We start the datanodeStateMachine in the INIT State.
-   * <p>
-   * 2. We invoke the INIT state task.
-   * <p>
-   * 3. That creates a set of RPC endpoints that are ready to connect to SCMs.
-   * <p>
-   * 4. We assert that we have moved to the running state for the
-   * DatanodeStateMachine.
-   * <p>
-   * 5. We get the task for the Running State - Executing that running state,
-   * makes the first network call in of the state machine. The Endpoint is in
-   * the GETVERSION State and we invoke the task.
-   * <p>
-   * 6. We assert that this call was a success by checking that each of the
-   * endponts now have version response that it got from the SCM server that it
-   * was talking to and also each of the mock server serviced one RPC call.
-   * <p>
-   * 7. Since the Register is done now, next calls to get task will return
-   * HeartbeatTask, which sends heartbeats to SCM. We assert that we get right
-   * task from sub-system below.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testDatanodeStateContext() throws IOException,
-      InterruptedException, ExecutionException, TimeoutException {
-    // There is no mini cluster started in this test,
-    // create a ID file so that state machine could load a fake datanode ID.
-    File idPath = new File(
-        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID));
-    idPath.delete();
-    DatanodeDetails datanodeDetails = getNewDatanodeDetails();
-    DatanodeDetails.Port port = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    datanodeDetails.setPort(port);
-    ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
-
-    try (DatanodeStateMachine stateMachine =
-             new DatanodeStateMachine(datanodeDetails, conf)) {
-      DatanodeStateMachine.DatanodeStates currentState =
-          stateMachine.getContext().getState();
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
-          currentState);
-
-      DatanodeState<DatanodeStateMachine.DatanodeStates> task =
-          stateMachine.getContext().getTask();
-      Assert.assertEquals(InitDatanodeState.class, task.getClass());
-
-      task.execute(executorService);
-      DatanodeStateMachine.DatanodeStates newState =
-          task.await(2, TimeUnit.SECONDS);
-
-      for (EndpointStateMachine endpoint :
-          stateMachine.getConnectionManager().getValues()) {
-        // We assert that each of the is in State GETVERSION.
-        Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION,
-            endpoint.getState());
-      }
-
-      // The Datanode has moved into Running State, since endpoints are 
created.
-      // We move to running state when we are ready to issue RPC calls to SCMs.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-      // If we had called context.execute instead of calling into each state
-      // this would have happened automatically.
-      stateMachine.getContext().setState(newState);
-      task = stateMachine.getContext().getTask();
-      Assert.assertEquals(RunningDatanodeState.class, task.getClass());
-
-      // This execute will invoke getVersion calls against all SCM endpoints
-      // that we know of.
-
-      task.execute(executorService);
-      newState = task.await(10, TimeUnit.SECONDS);
-      // If we are in running state, we should be in running.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-      for (EndpointStateMachine endpoint :
-          stateMachine.getConnectionManager().getValues()) {
-
-        // Since the earlier task.execute called into GetVersion, the
-        // endPointState Machine should move to REGISTER state.
-        Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
-            endpoint.getState());
-
-        // We assert that each of the end points have gotten a version from the
-        // SCM Server.
-        Assert.assertNotNull(endpoint.getVersion());
-      }
-
-      // We can also assert that all mock servers have received only one RPC
-      // call at this point of time.
-      for (ScmTestMock mock : mockServers) {
-        Assert.assertEquals(1, mock.getRpcCount());
-      }
-
-      // This task is the Running task, but running task executes tasks based
-      // on the state of Endpoints, hence this next call will be a Register at
-      // the endpoint RPC level.
-      task = stateMachine.getContext().getTask();
-      task.execute(executorService);
-      newState = task.await(2, TimeUnit.SECONDS);
-
-      // If we are in running state, we should be in running.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-      for (ScmTestMock mock : mockServers) {
-        Assert.assertEquals(2, mock.getRpcCount());
-      }
-
-      // This task is the Running task, but running task executes tasks based
-      // on the state of Endpoints, hence this next call will be a
-      // HeartbeatTask at the endpoint RPC level.
-      task = stateMachine.getContext().getTask();
-      task.execute(executorService);
-      newState = task.await(2, TimeUnit.SECONDS);
-
-      // If we are in running state, we should be in running.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
-          newState);
-
-
-      for (ScmTestMock mock : mockServers) {
-        Assert.assertEquals(1, mock.getHeartbeatCount());
-      }
-    }
-  }
-
-  @Test
-  public void testDatanodeStateMachineWithIdWriteFail() throws Exception {
-
-    File idPath = new File(
-        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID));
-    idPath.delete();
-    DatanodeDetails datanodeDetails = getNewDatanodeDetails();
-    DatanodeDetails.Port port = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    datanodeDetails.setPort(port);
-
-    try (DatanodeStateMachine stateMachine =
-             new DatanodeStateMachine(datanodeDetails, conf)) {
-      DatanodeStateMachine.DatanodeStates currentState =
-          stateMachine.getContext().getState();
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
-          currentState);
-
-      DatanodeState<DatanodeStateMachine.DatanodeStates> task =
-          stateMachine.getContext().getTask();
-      Assert.assertEquals(InitDatanodeState.class, task.getClass());
-
-      //Set the idPath to read only, state machine will fail to write
-      // datanodeId file and set the state to shutdown.
-      idPath.getParentFile().mkdirs();
-      idPath.getParentFile().setReadOnly();
-
-      task.execute(executorService);
-      DatanodeStateMachine.DatanodeStates newState =
-          task.await(2, TimeUnit.SECONDS);
-
-      //As, we have changed the permission of idPath to readable, writing
-      // will fail and it will set the state to shutdown.
-      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
-          newState);
-
-      //Setting back to writable.
-      idPath.getParentFile().setWritable(true);
-    }
-  }
-
-  /**
-   * Test state transition with a list of invalid scm configurations,
-   * and verify the state transits to SHUTDOWN each time.
-   */
-  @Test
-  public void testDatanodeStateMachineWithInvalidConfiguration()
-      throws Exception {
-    LinkedList<Map.Entry<String, String>> confList =
-        new LinkedList<Map.Entry<String, String>>();
-    confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
-
-    // Invalid ozone.scm.names
-    /** Empty **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, ""));
-    /** Invalid schema **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, "x..y"));
-    /** Invalid port **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz"));
-    /** Port out of range **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_NAMES, "scm:123456"));
-    // Invalid ozone.scm.datanode.id
-    /** Empty **/
-    confList.add(Maps.immutableEntry(
-        ScmConfigKeys.OZONE_SCM_DATANODE_ID, ""));
-
-    confList.forEach((entry) -> {
-      Configuration perTestConf = new Configuration(conf);
-      perTestConf.setStrings(entry.getKey(), entry.getValue());
-      LOG.info("Test with {} = {}", entry.getKey(), entry.getValue());
-      try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
-          getNewDatanodeDetails(), perTestConf)) {
-        DatanodeStateMachine.DatanodeStates currentState =
-            stateMachine.getContext().getState();
-        Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
-            currentState);
-        DatanodeState<DatanodeStateMachine.DatanodeStates> task =
-            stateMachine.getContext().getTask();
-        task.execute(executorService);
-        DatanodeStateMachine.DatanodeStates newState =
-            task.await(2, TimeUnit.SECONDS);
-        Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
-            newState);
-      } catch (Exception e) {
-        Assert.fail("Unexpected exception found");
-      }
-    });
-  }
-
-  private DatanodeDetails getNewDatanodeDetails() {
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    return DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort)
-        .build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
deleted file mode 100644
index f991520..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.junit.Test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-/**
- * This class is used to test the KeyValueContainerData.
- */
-public class TestKeyValueContainerData {
-
-  private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
-  @Test
-  public void testKeyValueData() {
-    long containerId = 1L;
-    ContainerProtos.ContainerType containerType = ContainerProtos
-        .ContainerType.KeyValueContainer;
-    String path = "/tmp";
-    String containerDBType = "RocksDB";
-    ContainerProtos.ContainerLifeCycleState state = ContainerProtos
-        .ContainerLifeCycleState.CLOSED;
-    AtomicLong val = new AtomicLong(0);
-
-    KeyValueContainerData kvData = new KeyValueContainerData(containerId,
-        MAXSIZE);
-
-    assertEquals(containerType, kvData.getContainerType());
-    assertEquals(containerId, kvData.getContainerID());
-    assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData
-        .getState());
-    assertEquals(0, kvData.getMetadata().size());
-    assertEquals(0, kvData.getNumPendingDeletionBlocks());
-    assertEquals(val.get(), kvData.getReadBytes());
-    assertEquals(val.get(), kvData.getWriteBytes());
-    assertEquals(val.get(), kvData.getReadCount());
-    assertEquals(val.get(), kvData.getWriteCount());
-    assertEquals(val.get(), kvData.getKeyCount());
-    assertEquals(val.get(), kvData.getNumPendingDeletionBlocks());
-    assertEquals(MAXSIZE, kvData.getMaxSize());
-
-    kvData.setState(state);
-    kvData.setContainerDBType(containerDBType);
-    kvData.setChunksPath(path);
-    kvData.setMetadataPath(path);
-    kvData.incrReadBytes(10);
-    kvData.incrWriteBytes(10);
-    kvData.incrReadCount();
-    kvData.incrWriteCount();
-    kvData.incrKeyCount();
-    kvData.incrPendingDeletionBlocks(1);
-
-    assertEquals(state, kvData.getState());
-    assertEquals(containerDBType, kvData.getContainerDBType());
-    assertEquals(path, kvData.getChunksPath());
-    assertEquals(path, kvData.getMetadataPath());
-
-    assertEquals(10, kvData.getReadBytes());
-    assertEquals(10, kvData.getWriteBytes());
-    assertEquals(1, kvData.getReadCount());
-    assertEquals(1, kvData.getWriteCount());
-    assertEquals(1, kvData.getKeyCount());
-    assertEquals(1, kvData.getNumPendingDeletionBlocks());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
deleted file mode 100644
index 5889222..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
-import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-import static org.junit.Assert.*;
-
-/**
- * This class tests {@link DatanodeVersionFile}.
- */
-public class TestDatanodeVersionFile {
-
-  private File versionFile;
-  private DatanodeVersionFile dnVersionFile;
-  private Properties properties;
-
-  private String storageID;
-  private String clusterID;
-  private String datanodeUUID;
-  private long cTime;
-  private int lv;
-
-  @Rule
-  public TemporaryFolder folder= new TemporaryFolder();
-
-  @Before
-  public void setup() throws IOException {
-    versionFile = folder.newFile("Version");
-    storageID = UUID.randomUUID().toString();
-    clusterID = UUID.randomUUID().toString();
-    datanodeUUID = UUID.randomUUID().toString();
-    cTime = Time.now();
-    lv = DataNodeLayoutVersion.getLatestVersion().getVersion();
-
-    dnVersionFile = new DatanodeVersionFile(
-        storageID, clusterID, datanodeUUID, cTime, lv);
-
-    dnVersionFile.createVersionFile(versionFile);
-
-    properties = dnVersionFile.readFrom(versionFile);
-  }
-
-  @Test
-  public void testCreateAndReadVersionFile() throws IOException{
-
-    //Check VersionFile exists
-    assertTrue(versionFile.exists());
-
-    assertEquals(storageID, HddsVolumeUtil.getStorageID(
-        properties, versionFile));
-    assertEquals(clusterID, HddsVolumeUtil.getClusterID(
-        properties, versionFile, clusterID));
-    assertEquals(datanodeUUID, HddsVolumeUtil.getDatanodeUUID(
-        properties, versionFile, datanodeUUID));
-    assertEquals(cTime, HddsVolumeUtil.getCreationTime(
-        properties, versionFile));
-    assertEquals(lv, HddsVolumeUtil.getLayOutVersion(
-        properties, versionFile));
-  }
-
-  @Test
-  public void testIncorrectClusterId() throws IOException{
-    try {
-      String randomClusterID = UUID.randomUUID().toString();
-      HddsVolumeUtil.getClusterID(properties, versionFile,
-          randomClusterID);
-      fail("Test failure in testIncorrectClusterId");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Mismatched ClusterIDs", ex);
-    }
-  }
-
-  @Test
-  public void testVerifyCTime() throws IOException{
-    long invalidCTime = -10;
-    dnVersionFile = new DatanodeVersionFile(
-        storageID, clusterID, datanodeUUID, invalidCTime, lv);
-    dnVersionFile.createVersionFile(versionFile);
-    properties = dnVersionFile.readFrom(versionFile);
-
-    try {
-      HddsVolumeUtil.getCreationTime(properties, versionFile);
-      fail("Test failure in testVerifyCTime");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Invalid Creation time in " +
-          "Version File : " + versionFile, ex);
-    }
-  }
-
-  @Test
-  public void testVerifyLayOut() throws IOException{
-    int invalidLayOutVersion = 100;
-    dnVersionFile = new DatanodeVersionFile(
-        storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion);
-    dnVersionFile.createVersionFile(versionFile);
-    Properties props = dnVersionFile.readFrom(versionFile);
-
-    try {
-      HddsVolumeUtil.getLayOutVersion(props, versionFile);
-      fail("Test failure in testVerifyLayOut");
-    } catch (InconsistentStorageStateException ex) {
-      GenericTestUtils.assertExceptionContains("Invalid layOutVersion.", ex);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
deleted file mode 100644
index c7b9e0a..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.fs.FileSystemTestHelper;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * This class tests create/read .container files.
- */
-public class TestContainerDataYaml {
-
-  private static long testContainerID = 1234;
-
-  private static String testRoot = new FileSystemTestHelper().getTestRootDir();
-
-  private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5);
-
-  /**
-   * Creates a .container file. cleanup() should be called at the end of the
-   * test when container file is created.
-   */
-  private File createContainerFile(long containerID) throws IOException {
-    new File(testRoot).mkdirs();
-
-    String containerPath = containerID + ".container";
-
-    KeyValueContainerData keyValueContainerData = new KeyValueContainerData(
-        containerID, MAXSIZE);
-    keyValueContainerData.setContainerDBType("RocksDB");
-    keyValueContainerData.setMetadataPath(testRoot);
-    keyValueContainerData.setChunksPath(testRoot);
-
-    File containerFile = new File(testRoot, containerPath);
-
-    // Create .container file with ContainerData
-    ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
-        .KeyValueContainer, keyValueContainerData, containerFile);
-
-    //Check .container file exists or not.
-    assertTrue(containerFile.exists());
-
-    return containerFile;
-  }
-
-  private void cleanup() {
-    FileUtil.fullyDelete(new File(testRoot));
-  }
-
-  @Test
-  public void testCreateContainerFile() throws IOException {
-    long containerID = testContainerID++;
-
-    File containerFile = createContainerFile(containerID);
-
-    // Read from .container file, and verify data.
-    KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    assertEquals(containerID, kvData.getContainerID());
-    assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
-        .getContainerType());
-    assertEquals("RocksDB", kvData.getContainerDBType());
-    assertEquals(containerFile.getParent(), kvData.getMetadataPath());
-    assertEquals(containerFile.getParent(), kvData.getChunksPath());
-    assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData
-        .getState());
-    assertEquals(1, kvData.getLayOutVersion());
-    assertEquals(0, kvData.getMetadata().size());
-    assertEquals(MAXSIZE, kvData.getMaxSize());
-
-    // Update ContainerData.
-    kvData.addMetadata("VOLUME", "hdfs");
-    kvData.addMetadata("OWNER", "ozone");
-    kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
-
-
-    ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
-            .KeyValueContainer, kvData, containerFile);
-
-    // Reading newly updated data from .container file
-    kvData =  (KeyValueContainerData) ContainerDataYaml.readContainerFile(
-        containerFile);
-
-    // verify data.
-    assertEquals(containerID, kvData.getContainerID());
-    assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
-        .getContainerType());
-    assertEquals("RocksDB", kvData.getContainerDBType());
-    assertEquals(containerFile.getParent(), kvData.getMetadataPath());
-    assertEquals(containerFile.getParent(), kvData.getChunksPath());
-    assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData
-        .getState());
-    assertEquals(1, kvData.getLayOutVersion());
-    assertEquals(2, kvData.getMetadata().size());
-    assertEquals("hdfs", kvData.getMetadata().get("VOLUME"));
-    assertEquals("ozone", kvData.getMetadata().get("OWNER"));
-    assertEquals(MAXSIZE, kvData.getMaxSize());
-  }
-
-  @Test
-  public void testIncorrectContainerFile() throws IOException{
-    try {
-      String containerFile = "incorrect.container";
-      //Get file from resources folder
-      ClassLoader classLoader = getClass().getClassLoader();
-      File file = new File(classLoader.getResource(containerFile).getFile());
-      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-          .readContainerFile(file);
-      fail("testIncorrectContainerFile failed");
-    } catch (IllegalStateException ex) {
-      GenericTestUtils.assertExceptionContains("Unexpected " +
-          "ContainerLifeCycleState", ex);
-    }
-  }
-
-
-  @Test
-  public void testCheckBackWardCompatabilityOfContainerFile() throws
-      IOException {
-    // This test is for if we upgrade, and then .container files added by new
-    // server will have new fields added to .container file, after a while we
-    // decided to rollback. Then older ozone can read .container files
-    // created or not.
-
-    try {
-      String containerFile = "additionalfields.container";
-      //Get file from resources folder
-      ClassLoader classLoader = getClass().getClassLoader();
-      File file = new File(classLoader.getResource(containerFile).getFile());
-      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-          .readContainerFile(file);
-      ContainerUtils.verifyChecksum(kvData);
-
-      //Checking the Container file data is consistent or not
-      assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData
-          .getState());
-      assertEquals("RocksDB", kvData.getContainerDBType());
-      assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
-          .getContainerType());
-      assertEquals(9223372036854775807L, kvData.getContainerID());
-      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
-          .getChunksPath());
-      assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData
-          .getMetadataPath());
-      assertEquals(1, kvData.getLayOutVersion());
-      assertEquals(2, kvData.getMetadata().size());
-
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      fail("testCheckBackWardCompatabilityOfContainerFile failed");
-    }
-  }
-
-  /**
-   * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}.
-   */
-  @Test
-  public void testChecksumInContainerFile() throws IOException {
-    long containerID = testContainerID++;
-
-    File containerFile = createContainerFile(containerID);
-
-    // Read from .container file, and verify data.
-    KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-        .readContainerFile(containerFile);
-    ContainerUtils.verifyChecksum(kvData);
-
-    cleanup();
-  }
-
-  /**
-   * Test to verify incorrect checksum is detected.
-   */
-  @Test
-  public void testIncorrectChecksum() {
-    try {
-      String containerFile = "incorrect.checksum.container";
-      //Get file from resources folder
-      ClassLoader classLoader = getClass().getClassLoader();
-      File file = new File(classLoader.getResource(containerFile).getFile());
-      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
-          .readContainerFile(file);
-      ContainerUtils.verifyChecksum(kvData);
-      fail("testIncorrectChecksum failed");
-    } catch (Exception ex) {
-      GenericTestUtils.assertExceptionContains("Container checksum error for " 
+
-          "ContainerID:", ex);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
deleted file mode 100644
index af322ea..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Class used to test ContainerSet operations.
- */
-public class TestContainerSet {
-
-  @Test
-  public void testAddGetRemoveContainer() throws StorageContainerException {
-    ContainerSet containerSet = new ContainerSet();
-    long containerId = 100L;
-    ContainerProtos.ContainerLifeCycleState state = ContainerProtos
-        .ContainerLifeCycleState.CLOSED;
-
-    KeyValueContainerData kvData = new KeyValueContainerData(containerId,
-        (long) StorageUnit.GB.toBytes(5));
-    kvData.setState(state);
-    KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new
-        OzoneConfiguration());
-
-    //addContainer
-    boolean result = containerSet.addContainer(keyValueContainer);
-    assertTrue(result);
-    try {
-      result = containerSet.addContainer(keyValueContainer);
-      fail("Adding same container ID twice should fail.");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Container already exists with" 
+
-          " container Id " + containerId, ex);
-    }
-
-    //getContainer
-    KeyValueContainer container = (KeyValueContainer) containerSet
-        .getContainer(containerId);
-    KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
-        container.getContainerData();
-    assertEquals(containerId, keyValueContainerData.getContainerID());
-    assertEquals(state, keyValueContainerData.getState());
-    assertNull(containerSet.getContainer(1000L));
-
-    //removeContainer
-    assertTrue(containerSet.removeContainer(containerId));
-    assertFalse(containerSet.removeContainer(1000L));
-  }
-
-  @Test
-  public void testIteratorsAndCount() throws StorageContainerException {
-
-    ContainerSet containerSet = createContainerSet();
-
-    assertEquals(10, containerSet.containerCount());
-
-    // Using containerIterator.
-    Iterator<Container> containerIterator = 
containerSet.getContainerIterator();
-
-    int count = 0;
-    while(containerIterator.hasNext()) {
-      Container kv = containerIterator.next();
-      ContainerData containerData = kv.getContainerData();
-      long containerId = containerData.getContainerID();
-      if (containerId%2 == 0) {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
-            containerData.getState());
-      } else {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN,
-            containerData.getState());
-      }
-      count++;
-    }
-    assertEquals(10, count);
-
-    //Using containerMapIterator.
-    Iterator<Map.Entry<Long, Container>> containerMapIterator = containerSet
-        .getContainerMapIterator();
-
-    count = 0;
-    while (containerMapIterator.hasNext()) {
-      Container kv = containerMapIterator.next().getValue();
-      ContainerData containerData = kv.getContainerData();
-      long containerId = containerData.getContainerID();
-      if (containerId%2 == 0) {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
-            containerData.getState());
-      } else {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN,
-            containerData.getState());
-      }
-      count++;
-    }
-    assertEquals(10, count);
-
-  }
-
-
-  @Test
-  public void testGetContainerReport() throws IOException {
-
-    ContainerSet containerSet = createContainerSet();
-
-    ContainerReportsProto containerReportsRequestProto = containerSet
-        .getContainerReport();
-
-    assertEquals(10, containerReportsRequestProto.getReportsList().size());
-  }
-
-
-
-  @Test
-  public void testListContainer() throws StorageContainerException {
-    ContainerSet containerSet = createContainerSet();
-
-    List<ContainerData> result = new ArrayList<>();
-    containerSet.listContainer(2, 5, result);
-
-    assertEquals(5, result.size());
-
-    for(ContainerData containerData : result) {
-      assertTrue(containerData.getContainerID() >=2 && containerData
-          .getContainerID()<=6);
-    }
-  }
-
-  private ContainerSet createContainerSet() throws StorageContainerException {
-    ContainerSet containerSet = new ContainerSet();
-    for (int i=0; i<10; i++) {
-      KeyValueContainerData kvData = new KeyValueContainerData(i,
-          (long) StorageUnit.GB.toBytes(5));
-      if (i%2 == 0) {
-        kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
-      } else {
-        kvData.setState(ContainerProtos.ContainerLifeCycleState.OPEN);
-      }
-      KeyValueContainer kv = new KeyValueContainer(kvData, new
-          OzoneConfiguration());
-      containerSet.addContainer(kv);
-    }
-    return containerSet;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
deleted file mode 100644
index fc622b2..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.impl;
-
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto
-    .ContainerProtos.ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import 
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.shaded.com.google.protobuf.ByteString;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-/**
- * Test-cases to verify the functionality of HddsDispatcher.
- */
-public class TestHddsDispatcher {
-
-  @Test
-  public void testContainerCloseActionWhenFull() throws IOException {
-    String testDir = GenericTestUtils.getTempPath(
-        TestHddsDispatcher.class.getSimpleName());
-    try {
-      UUID scmId = UUID.randomUUID();
-      OzoneConfiguration conf = new OzoneConfiguration();
-      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
-      DatanodeDetails dd = randomDatanodeDetails();
-      ContainerSet containerSet = new ContainerSet();
-      VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
-      StateContext context = Mockito.mock(StateContext.class);
-      KeyValueContainerData containerData = new KeyValueContainerData(1L,
-          (long) StorageUnit.GB.toBytes(1));
-      Container container = new KeyValueContainer(containerData, conf);
-      container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(),
-          scmId.toString());
-      containerSet.addContainer(container);
-      HddsDispatcher hddsDispatcher = new HddsDispatcher(
-          conf, containerSet, volumeSet, context);
-      hddsDispatcher.setScmId(scmId.toString());
-      ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(
-          getWriteChunkRequest(dd.getUuidString(), 1L, 1L));
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          responseOne.getResult());
-      verify(context, times(0))
-          .addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
-      containerData.setBytesUsed(Double.valueOf(
-          StorageUnit.MB.toBytes(950)).longValue());
-      ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(
-          getWriteChunkRequest(dd.getUuidString(), 1L, 2L));
-      Assert.assertEquals(ContainerProtos.Result.SUCCESS,
-          responseTwo.getResult());
-      verify(context, times(1))
-          .addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
-
-    } finally {
-      FileUtils.deleteDirectory(new File(testDir));
-    }
-
-  }
-
-  // This method has to be removed once we move scm/TestUtils.java
-  // from server-scm project to container-service or to common project.
-  private static DatanodeDetails randomDatanodeDetails() {
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-
-  private ContainerCommandRequestProto getWriteChunkRequest(
-      String datanodeId, Long containerId, Long localId) {
-
-    ByteString data = ByteString.copyFrom(
-        UUID.randomUUID().toString().getBytes());
-    ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
-        .newBuilder()
-        .setChunkName(
-            DigestUtils.md5Hex("dummy-key") + "_stream_"
-                + containerId + "_chunk_" + localId)
-        .setOffset(0)
-        .setLen(data.size())
-        .build();
-
-    WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
-        .newBuilder()
-        .setBlockID(new BlockID(containerId, localId)
-            .getDatanodeBlockIDProtobuf())
-        .setChunkData(chunk)
-        .setData(data);
-
-    return ContainerCommandRequestProto
-        .newBuilder()
-        .setContainerID(containerId)
-        .setCmdType(ContainerProtos.Type.WriteChunk)
-        .setTraceID(UUID.randomUUID().toString())
-        .setDatanodeUuid(datanodeId)
-        .setWriteChunk(writeChunkRequest)
-        .build();
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
deleted file mode 100644
index 07c78c0..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Datanode container related test-cases.
- */
-package org.apache.hadoop.ozone.container.common.impl;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
deleted file mode 100644
index b658295..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.interfaces;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-
-/**
- * Tests Handler interface.
- */
-public class TestHandler {
-  @Rule
-  public TestRule timeout = new Timeout(300000);
-
-  private Configuration conf;
-  private HddsDispatcher dispatcher;
-  private ContainerSet containerSet;
-  private VolumeSet volumeSet;
-  private Handler handler;
-
-  @Before
-  public void setup() throws Exception {
-    this.conf = new Configuration();
-    this.containerSet = Mockito.mock(ContainerSet.class);
-    this.volumeSet = Mockito.mock(VolumeSet.class);
-
-    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, null);
-  }
-
-  @Test
-  public void testGetKeyValueHandler() throws Exception {
-    Handler kvHandler = dispatcher.getHandler(
-        ContainerProtos.ContainerType.KeyValueContainer);
-
-    Assert.assertTrue("getHandlerForContainerType returned incorrect handler",
-        (kvHandler instanceof KeyValueHandler));
-  }
-
-  @Test
-  public void testGetHandlerForInvalidContainerType() {
-    // When new ContainerProtos.ContainerType are added, increment the code
-    // for invalid enum.
-    ContainerProtos.ContainerType invalidContainerType =
-        ContainerProtos.ContainerType.forNumber(2);
-
-    Assert.assertEquals("New ContainerType detected. Not an invalid " +
-        "containerType", invalidContainerType, null);
-
-    Handler handler = dispatcher.getHandler(invalidContainerType);
-    Assert.assertEquals("Get Handler for Invalid ContainerType should " +
-        "return null.", handler, null);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
deleted file mode 100644
index ca3d29d..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * SCM Testing and Mocking Utils.
- */
-package org.apache.hadoop.ozone.container.common;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
deleted file mode 100644
index aae388d..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.util.concurrent.ScheduledExecutorService;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-/**
- * Test cases to test {@link ReportManager}.
- */
-public class TestReportManager {
-
-  @Test
-  public void testReportManagerInit() {
-    Configuration conf = new OzoneConfiguration();
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class);
-    ReportManager.Builder builder = ReportManager.newBuilder(conf);
-    builder.setStateContext(dummyContext);
-    builder.addPublisher(dummyPublisher);
-    ReportManager reportManager = builder.build();
-    reportManager.init();
-    verify(dummyPublisher, times(1)).init(eq(dummyContext),
-        any(ScheduledExecutorService.class));
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
deleted file mode 100644
index 1e82326..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.GeneratedMessage;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsIdFactory;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.util.Random;
-import java.util.UUID;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-/**
- * Test cases to test {@link ReportPublisher}.
- */
-public class TestReportPublisher {
-
-  private static Configuration config;
-
-  @BeforeClass
-  public static void setup() {
-    config = new OzoneConfiguration();
-  }
-
-  /**
-   * Dummy report publisher for testing.
-   */
-  private class DummyReportPublisher extends ReportPublisher {
-
-    private final long frequency;
-    private int getReportCount = 0;
-
-    DummyReportPublisher(long frequency) {
-      this.frequency = frequency;
-    }
-
-    @Override
-    protected long getReportFrequency() {
-      return frequency;
-    }
-
-    @Override
-    protected GeneratedMessage getReport() {
-      getReportCount++;
-      return null;
-    }
-  }
-
-  @Test
-  public void testReportPublisherInit() {
-    ReportPublisher publisher = new DummyReportPublisher(0);
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ScheduledExecutorService dummyExecutorService = Mockito.mock(
-        ScheduledExecutorService.class);
-    publisher.init(dummyContext, dummyExecutorService);
-    verify(dummyExecutorService, times(1)).schedule(publisher,
-        0, TimeUnit.MILLISECONDS);
-  }
-
-  @Test
-  public void testScheduledReport() throws InterruptedException {
-    ReportPublisher publisher = new DummyReportPublisher(100);
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ScheduledExecutorService executorService = HadoopExecutors
-        .newScheduledThreadPool(1,
-            new ThreadFactoryBuilder().setDaemon(true)
-                .setNameFormat("Unit test ReportManager Thread - %d").build());
-    publisher.init(dummyContext, executorService);
-    Thread.sleep(150);
-    Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount);
-    Thread.sleep(100);
-    Assert.assertEquals(2, ((DummyReportPublisher) publisher).getReportCount);
-    executorService.shutdown();
-  }
-
-  @Test
-  public void testPublishReport() throws InterruptedException {
-    ReportPublisher publisher = new DummyReportPublisher(100);
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ScheduledExecutorService executorService = HadoopExecutors
-        .newScheduledThreadPool(1,
-            new ThreadFactoryBuilder().setDaemon(true)
-                .setNameFormat("Unit test ReportManager Thread - %d").build());
-    publisher.init(dummyContext, executorService);
-    Thread.sleep(150);
-    executorService.shutdown();
-    Assert.assertEquals(1, ((DummyReportPublisher) publisher).getReportCount);
-    verify(dummyContext, times(1)).addReport(null);
-
-  }
-
-  @Test
-  public void testCommandStatusPublisher() throws InterruptedException {
-    StateContext dummyContext = Mockito.mock(StateContext.class);
-    ReportPublisher publisher = new CommandStatusReportPublisher();
-    final Map<Long, CommandStatus> cmdStatusMap = new ConcurrentHashMap<>();
-    when(dummyContext.getCommandStatusMap()).thenReturn(cmdStatusMap);
-    publisher.setConf(config);
-
-    ScheduledExecutorService executorService = HadoopExecutors
-        .newScheduledThreadPool(1,
-            new ThreadFactoryBuilder().setDaemon(true)
-                .setNameFormat("Unit test ReportManager Thread - %d").build());
-    publisher.init(dummyContext, executorService);
-    Assert.assertNull(((CommandStatusReportPublisher) publisher).getReport());
-
-    // Insert to status object to state context map and then get the report.
-    CommandStatus obj1 = CommandStatus.CommandStatusBuilder.newBuilder()
-        .setCmdId(HddsIdFactory.getLongId())
-        .setType(Type.deleteBlocksCommand)
-        .setStatus(Status.PENDING)
-        .build();
-    CommandStatus obj2 = CommandStatus.CommandStatusBuilder.newBuilder()
-        .setCmdId(HddsIdFactory.getLongId())
-        .setType(Type.closeContainerCommand)
-        .setStatus(Status.EXECUTED)
-        .build();
-    cmdStatusMap.put(obj1.getCmdId(), obj1);
-    cmdStatusMap.put(obj2.getCmdId(), obj2);
-    Assert.assertEquals("Should publish report with 2 status objects", 2,
-        ((CommandStatusReportPublisher) publisher).getReport()
-            .getCmdStatusCount());
-    Assert.assertEquals(
-        "Next report should have 1 status objects as command status o"
-            + "bjects are still in Pending state",
-        1, ((CommandStatusReportPublisher) publisher).getReport()
-            .getCmdStatusCount());
-    Assert.assertTrue(
-        "Next report should have 1 status objects as command status "
-            + "objects are still in Pending state",
-        ((CommandStatusReportPublisher) publisher).getReport()
-            .getCmdStatusList().get(0).getStatus().equals(Status.PENDING));
-    executorService.shutdown();
-  }
-
-  /**
-   * Get a datanode details.
-   *
-   * @return DatanodeDetails
-   */
-  private static DatanodeDetails getDatanodeDetails() {
-    String uuid = UUID.randomUUID().toString();
-    Random random = new Random();
-    String ipAddress =
-        random.nextInt(256) + "." + random.nextInt(256) + "." + random
-            .nextInt(256) + "." + random.nextInt(256);
-
-    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.STANDALONE, 0);
-    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.RATIS, 0);
-    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-        DatanodeDetails.Port.Name.REST, 0);
-    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
-    builder.setUuid(uuid)
-        .setHostName("localhost")
-        .setIpAddress(ipAddress)
-        .addPort(containerPort)
-        .addPort(ratisPort)
-        .addPort(restPort);
-    return builder.build();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
deleted file mode 100644
index f8c5fe5..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.report;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-/**
- * Test cases to test ReportPublisherFactory.
- */
-public class TestReportPublisherFactory {
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Test
-  public void testGetContainerReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
-    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    ReportPublisher publisher = factory
-        .getPublisherFor(ContainerReportsProto.class);
-    Assert.assertEquals(ContainerReportPublisher.class, publisher.getClass());
-    Assert.assertEquals(conf, publisher.getConf());
-  }
-
-  @Test
-  public void testGetNodeReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
-    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    ReportPublisher publisher = factory
-        .getPublisherFor(NodeReportProto.class);
-    Assert.assertEquals(NodeReportPublisher.class, publisher.getClass());
-    Assert.assertEquals(conf, publisher.getConf());
-  }
-
-  @Test
-  public void testInvalidReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
-    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
-    exception.expect(RuntimeException.class);
-    exception.expectMessage("No publisher found for report");
-    factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
deleted file mode 100644
index 37615bc..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.report;
-/**
- * This package has test cases for all the report publishers which generates
- * reports that are sent to SCM via heartbeat.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
deleted file mode 100644
index 05ac76d..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-/**
- * Tests for command handlers.
- */
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
deleted file mode 100644
index 606940b..0000000
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.states.endpoint;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine.DatanodeStates;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-
-import java.util.UUID;
-
-/**
- * This class tests the functionality of HeartbeatEndpointTask.
- */
-public class TestHeartbeatEndpointTask {
-
-
-  @Test
-  public void testheartbeatWithoutReports() throws Exception {
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(scm);
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithNodeReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(NodeReportProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithContainerReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(ContainerReportsProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithCommandStatusReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(CommandStatusReportsProto.getDefaultInstance());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0);
-    Assert.assertFalse(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithContainerActions() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addContainerAction(getContainerAction());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertFalse(heartbeat.hasNodeReport());
-    Assert.assertFalse(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() == 0);
-    Assert.assertTrue(heartbeat.hasContainerActions());
-  }
-
-  @Test
-  public void testheartbeatWithAllReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-
-    StorageContainerDatanodeProtocolClientSideTranslatorPB scm =
-        Mockito.mock(
-            StorageContainerDatanodeProtocolClientSideTranslatorPB.class);
-    ArgumentCaptor<SCMHeartbeatRequestProto> argument = ArgumentCaptor
-        .forClass(SCMHeartbeatRequestProto.class);
-    Mockito.when(scm.sendHeartbeat(argument.capture()))
-        .thenAnswer(invocation ->
-            SCMHeartbeatResponseProto.newBuilder()
-                .setDatanodeUUID(
-                    ((SCMHeartbeatRequestProto)invocation.getArgument(0))
-                        .getDatanodeDetails().getUuid())
-                .build());
-
-    HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
-        conf, context, scm);
-    context.addReport(NodeReportProto.getDefaultInstance());
-    context.addReport(ContainerReportsProto.getDefaultInstance());
-    context.addReport(CommandStatusReportsProto.getDefaultInstance());
-    context.addContainerAction(getContainerAction());
-    endpointTask.call();
-    SCMHeartbeatRequestProto heartbeat = argument.getValue();
-    Assert.assertTrue(heartbeat.hasDatanodeDetails());
-    Assert.assertTrue(heartbeat.hasNodeReport());
-    Assert.assertTrue(heartbeat.hasContainerReport());
-    Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0);
-    Assert.assertTrue(heartbeat.hasContainerActions());
-  }
-
-  /**
-   * Creates HeartbeatEndpointTask for the given StorageContainerManager proxy.
-   *
-   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
-   *
-   * @return HeartbeatEndpointTask
-   */
-  private HeartbeatEndpointTask getHeartbeatEndpointTask(
-      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
-    Configuration conf = new OzoneConfiguration();
-    StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
-        Mockito.mock(DatanodeStateMachine.class));
-    return getHeartbeatEndpointTask(conf, context, proxy);
-
-  }
-
-  /**
-   * Creates HeartbeatEndpointTask with the given conf, context and
-   * StorageContainerManager client side proxy.
-   *
-   * @param conf Configuration
-   * @param context StateContext
-   * @param proxy StorageContainerDatanodeProtocolClientSideTranslatorPB
-   *
-   * @return HeartbeatEndpointTask
-   */
-  private HeartbeatEndpointTask getHeartbeatEndpointTask(
-      Configuration conf,
-      StateContext context,
-      StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
-    DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
-        .setUuid(UUID.randomUUID().toString())
-        .setHostName("localhost")
-        .setIpAddress("127.0.0.1")
-        .build();
-    EndpointStateMachine endpointStateMachine = Mockito
-        .mock(EndpointStateMachine.class);
-    Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy);
-    return HeartbeatEndpointTask.newBuilder()
-        .setConfig(conf)
-        .setDatanodeDetails(datanodeDetails)
-        .setContext(context)
-        .setEndpointStateMachine(endpointStateMachine)
-        .build();
-  }
-
-  private ContainerAction getContainerAction() {
-    ContainerAction.Builder builder = ContainerAction.newBuilder();
-    builder.setContainerID(1L)
-        .setAction(ContainerAction.Action.CLOSE)
-        .setReason(ContainerAction.Reason.CONTAINER_FULL);
-    return builder.build();
-  }
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to