http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
new file mode 100644
index 0000000..41a8a80
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -0,0 +1,274 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.VersionInfo;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos
+    .ContainerBlocksDeletionACKResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ReportState;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
+import org.apache.hadoop.ozone.protocol.VersionResponse;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * SCM RPC mock class.
+ */
+public class ScmTestMock implements StorageContainerDatanodeProtocol {
+  private int rpcResponseDelay;
+  private AtomicInteger heartbeatCount = new AtomicInteger(0);
+  private AtomicInteger rpcCount = new AtomicInteger(0);
+  private ReportState reportState;
+  private AtomicInteger containerReportsCount = new AtomicInteger(0);
+
+  // Map of datanode to containers
+  private Map<DatanodeDetails, Map<String, ContainerInfo>> nodeContainers =
+      new HashMap();
+  /**
+   * Returns the number of heartbeats made to this class.
+   *
+   * @return int
+   */
+  public int getHeartbeatCount() {
+    return heartbeatCount.get();
+  }
+
+  /**
+   * Returns the number of RPC calls made to this mock class instance.
+   *
+   * @return - Number of RPC calls serviced by this class.
+   */
+  public int getRpcCount() {
+    return rpcCount.get();
+  }
+
+  /**
+   * Gets the RPC response delay.
+   *
+   * @return delay in milliseconds.
+   */
+  public int getRpcResponseDelay() {
+    return rpcResponseDelay;
+  }
+
+  /**
+   * Sets the RPC response delay.
+   *
+   * @param rpcResponseDelay - delay in milliseconds.
+   */
+  public void setRpcResponseDelay(int rpcResponseDelay) {
+    this.rpcResponseDelay = rpcResponseDelay;
+  }
+
+  /**
+   * Returns the number of container reports server has seen.
+   * @return int
+   */
+  public int getContainerReportsCount() {
+    return containerReportsCount.get();
+  }
+
+  /**
+   * Returns the number of containers that have been reported so far.
+   * @return - count of reported containers.
+   */
+  public long getContainerCount() {
+    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
+      return containerMap.size();
+    }).sum();
+  }
+
+  /**
+   * Get the number keys reported from container reports.
+   * @return - number of keys reported.
+   */
+  public long getKeyCount() {
+    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
+      return containerMap.values().parallelStream().mapToLong((container) -> {
+        return container.getKeyCount();
+      }).sum();
+    }).sum();
+  }
+
+  /**
+   * Get the number of bytes used from container reports.
+   * @return - number of bytes used.
+   */
+  public long getBytesUsed() {
+    return nodeContainers.values().parallelStream().mapToLong((containerMap)->{
+      return containerMap.values().parallelStream().mapToLong((container) -> {
+        return container.getUsed();
+      }).sum();
+    }).sum();
+  }
+
+  /**
+   * Returns SCM version.
+   *
+   * @return Version info.
+   */
+  @Override
+  public StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto
+      getVersion(StorageContainerDatanodeProtocolProtos
+      .SCMVersionRequestProto unused) throws IOException {
+    rpcCount.incrementAndGet();
+    sleepIfNeeded();
+    VersionInfo versionInfo = VersionInfo.getLatestVersion();
+    return VersionResponse.newBuilder()
+        .setVersion(versionInfo.getVersion())
+        .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription())
+        .build().getProtobufMessage();
+  }
+
+  private void sleepIfNeeded() {
+    if (getRpcResponseDelay() > 0) {
+      try {
+        Thread.sleep(getRpcResponseDelay());
+      } catch (InterruptedException ex) {
+        // Just ignore this exception.
+      }
+    }
+  }
+
+  /**
+   * Used by data node to send a Heartbeat.
+   *
+   * @param datanodeDetailsProto - DatanodeDetailsProto.
+   * @param nodeReport - node report.
+   * @return - SCMHeartbeatResponseProto
+   * @throws IOException
+   */
+  @Override
+  public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto
+      sendHeartbeat(DatanodeDetailsProto datanodeDetailsProto,
+                    SCMNodeReport nodeReport, ReportState scmReportState)
+      throws IOException {
+    rpcCount.incrementAndGet();
+    heartbeatCount.incrementAndGet();
+    this.reportState = scmReportState;
+    sleepIfNeeded();
+    List<SCMCommandResponseProto>
+        cmdResponses = new LinkedList<>();
+    return SCMHeartbeatResponseProto.newBuilder().addAllCommands(cmdResponses)
+        .build();
+  }
+
+  /**
+   * Register Datanode.
+   *
+   * @param datanodeDetailsProto DatanodDetailsProto.
+   * @param scmAddresses - List of SCMs this datanode is configured to
+   * communicate.
+   * @return SCM Command.
+   */
+  @Override
+  public StorageContainerDatanodeProtocolProtos
+      .SCMRegisteredCmdResponseProto register(
+          DatanodeDetailsProto datanodeDetailsProto, String[] scmAddresses)
+      throws IOException {
+    rpcCount.incrementAndGet();
+    sleepIfNeeded();
+    return StorageContainerDatanodeProtocolProtos
+        .SCMRegisteredCmdResponseProto
+        .newBuilder().setClusterID(UUID.randomUUID().toString())
+        .setDatanodeUUID(datanodeDetailsProto.getUuid()).setErrorCode(
+            StorageContainerDatanodeProtocolProtos
+                .SCMRegisteredCmdResponseProto.ErrorCode.success).build();
+  }
+
+  /**
+   * Send a container report.
+   *
+   * @param reports -- Container report
+   * @return HeartbeatResponse.nullcommand.
+   * @throws IOException
+   */
+  @Override
+  public StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto
+      sendContainerReport(StorageContainerDatanodeProtocolProtos
+      .ContainerReportsRequestProto reports) throws IOException {
+    Preconditions.checkNotNull(reports);
+    containerReportsCount.incrementAndGet();
+
+    DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
+        reports.getDatanodeDetails());
+    if (reports.getReportsCount() > 0) {
+      Map containers = nodeContainers.get(datanode);
+      if (containers == null) {
+        containers = new LinkedHashMap();
+        nodeContainers.put(datanode, containers);
+      }
+
+      for (StorageContainerDatanodeProtocolProtos.ContainerInfo report:
+          reports.getReportsList()) {
+        containers.put(report.getContainerName(), report);
+      }
+    }
+
+    return StorageContainerDatanodeProtocolProtos
+        .ContainerReportsResponseProto.newBuilder().build();
+  }
+
+  @Override
+  public ContainerBlocksDeletionACKResponseProto 
sendContainerBlocksDeletionACK(
+      ContainerBlocksDeletionACKProto request) throws IOException {
+    return ContainerBlocksDeletionACKResponseProto
+        .newBuilder().getDefaultInstanceForType();
+  }
+
+  public ReportState getReportState() {
+    return this.reportState;
+  }
+
+  /**
+   * Reset the mock Scm for test to get a fresh start without rebuild MockScm.
+   */
+  public void reset() {
+    heartbeatCount.set(0);
+    rpcCount.set(0);
+    reportState = ReportState.newBuilder()
+        .setState(ReportState.states.noContainerReports)
+        .setCount(0).build();
+    containerReportsCount.set(0);
+    nodeContainers.clear();
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
new file mode 100644
index 0000000..9446ce2
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -0,0 +1,381 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.common;
+
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine
+    .SCMConnectionManager;
+import org.apache.hadoop.ozone.container.common.states.DatanodeState;
+import org.apache.hadoop.ozone.container.common.states.datanode
+    .InitDatanodeState;
+import org.apache.hadoop.ozone.container.common.states.datanode
+    .RunningDatanodeState;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.file.Paths;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests the datanode state machine class and its states.
+ */
+public class TestDatanodeStateMachine {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestDatanodeStateMachine.class);
+  private final int scmServerCount = 3;
+  private List<String> serverAddresses;
+  private List<RPC.Server> scmServers;
+  private List<ScmTestMock> mockServers;
+  private ExecutorService executorService;
+  private Configuration conf;
+  private File testRoot;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = SCMTestUtils.getConf();
+    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500,
+        TimeUnit.MILLISECONDS);
+    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
+    serverAddresses = new LinkedList<>();
+    scmServers = new LinkedList<>();
+    mockServers = new LinkedList<>();
+    for (int x = 0; x < scmServerCount; x++) {
+      int port = SCMTestUtils.getReuseableAddress().getPort();
+      String address = "127.0.0.1";
+      serverAddresses.add(address + ":" + port);
+      ScmTestMock mock = new ScmTestMock();
+
+      scmServers.add(SCMTestUtils.startScmRpcServer(conf, mock,
+          new InetSocketAddress(address, port), 10));
+      mockServers.add(mock);
+    }
+
+    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES,
+        serverAddresses.toArray(new String[0]));
+
+    String path = GenericTestUtils
+        .getTempPath(TestDatanodeStateMachine.class.getSimpleName());
+    testRoot = new File(path);
+    if (!testRoot.mkdirs()) {
+      LOG.info("Required directories {} already exist.", testRoot);
+    }
+
+    File dataDir = new File(testRoot, "data");
+    conf.set(DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
+    if (!dataDir.mkdirs()) {
+      LOG.info("Data dir create failed.");
+    }
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS,
+        new File(testRoot, "scm").getAbsolutePath());
+    path = Paths.get(path.toString(),
+        TestDatanodeStateMachine.class.getSimpleName() + ".id").toString();
+    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ID, path);
+    executorService = HadoopExecutors.newCachedThreadPool(
+        new ThreadFactoryBuilder().setDaemon(true)
+            .setNameFormat("Test Data Node State Machine Thread - 
%d").build());
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    try {
+      if (executorService != null) {
+        executorService.shutdown();
+        try {
+          if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
+            executorService.shutdownNow();
+          }
+
+          if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
+            LOG.error("Unable to shutdown properly.");
+          }
+        } catch (InterruptedException e) {
+          LOG.error("Error attempting to shutdown.", e);
+          executorService.shutdownNow();
+        }
+      }
+      for (RPC.Server s : scmServers) {
+        s.stop();
+      }
+    } catch (Exception e) {
+      //ignore all execption from the shutdown
+    } finally {
+      testRoot.delete();
+    }
+  }
+
+  /**
+   * Assert that starting statemachine executes the Init State.
+   *
+   * @throws InterruptedException
+   */
+  @Test
+  public void testStartStopDatanodeStateMachine() throws IOException,
+      InterruptedException, TimeoutException {
+    try (DatanodeStateMachine stateMachine =
+        new DatanodeStateMachine(getNewDatanodeDetails(), conf)) {
+      stateMachine.startDaemon();
+      SCMConnectionManager connectionManager =
+          stateMachine.getConnectionManager();
+      GenericTestUtils.waitFor(() -> connectionManager.getValues().size() == 3,
+          1000, 30000);
+
+      stateMachine.stopDaemon();
+      assertTrue(stateMachine.isDaemonStopped());
+    }
+  }
+
+  /**
+   * This test explores the state machine by invoking each call in sequence 
just
+   * like as if the state machine would call it. Because this is a test we are
+   * able to verify each of the assumptions.
+   * <p>
+   * Here is what happens at High level.
+   * <p>
+   * 1. We start the datanodeStateMachine in the INIT State.
+   * <p>
+   * 2. We invoke the INIT state task.
+   * <p>
+   * 3. That creates a set of RPC endpoints that are ready to connect to SCMs.
+   * <p>
+   * 4. We assert that we have moved to the running state for the
+   * DatanodeStateMachine.
+   * <p>
+   * 5. We get the task for the Running State - Executing that running state,
+   * makes the first network call in of the state machine. The Endpoint is in
+   * the GETVERSION State and we invoke the task.
+   * <p>
+   * 6. We assert that this call was a success by checking that each of the
+   * endponts now have version response that it got from the SCM server that it
+   * was talking to and also each of the mock server serviced one RPC call.
+   * <p>
+   * 7. Since the Register is done now, next calls to get task will return
+   * HeartbeatTask, which sends heartbeats to SCM. We assert that we get right
+   * task from sub-system below.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testDatanodeStateContext() throws IOException,
+      InterruptedException, ExecutionException, TimeoutException {
+    // There is no mini cluster started in this test,
+    // create a ID file so that state machine could load a fake datanode ID.
+    File idPath = new File(
+        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID));
+    idPath.delete();
+    DatanodeDetails datanodeDetails = getNewDatanodeDetails();
+    datanodeDetails.setContainerPort(
+        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+    ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
+
+    try (DatanodeStateMachine stateMachine =
+             new DatanodeStateMachine(datanodeDetails, conf)) {
+      DatanodeStateMachine.DatanodeStates currentState =
+          stateMachine.getContext().getState();
+      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
+          currentState);
+
+      DatanodeState<DatanodeStateMachine.DatanodeStates> task =
+          stateMachine.getContext().getTask();
+      Assert.assertEquals(InitDatanodeState.class, task.getClass());
+
+      task.execute(executorService);
+      DatanodeStateMachine.DatanodeStates newState =
+          task.await(2, TimeUnit.SECONDS);
+
+      for (EndpointStateMachine endpoint :
+          stateMachine.getConnectionManager().getValues()) {
+        // We assert that each of the is in State GETVERSION.
+        Assert.assertEquals(EndpointStateMachine.EndPointStates.GETVERSION,
+            endpoint.getState());
+      }
+
+      // The Datanode has moved into Running State, since endpoints are 
created.
+      // We move to running state when we are ready to issue RPC calls to SCMs.
+      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
+          newState);
+
+      // If we had called context.execute instead of calling into each state
+      // this would have happened automatically.
+      stateMachine.getContext().setState(newState);
+      task = stateMachine.getContext().getTask();
+      Assert.assertEquals(RunningDatanodeState.class, task.getClass());
+
+      // This execute will invoke getVersion calls against all SCM endpoints
+      // that we know of.
+
+      task.execute(executorService);
+      newState = task.await(10, TimeUnit.SECONDS);
+      // If we are in running state, we should be in running.
+      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
+          newState);
+
+      for (EndpointStateMachine endpoint :
+          stateMachine.getConnectionManager().getValues()) {
+
+        // Since the earlier task.execute called into GetVersion, the
+        // endPointState Machine should move to REGISTER state.
+        Assert.assertEquals(EndpointStateMachine.EndPointStates.REGISTER,
+            endpoint.getState());
+
+        // We assert that each of the end points have gotten a version from the
+        // SCM Server.
+        Assert.assertNotNull(endpoint.getVersion());
+      }
+
+      // We can also assert that all mock servers have received only one RPC
+      // call at this point of time.
+      for (ScmTestMock mock : mockServers) {
+        Assert.assertEquals(1, mock.getRpcCount());
+      }
+
+      // This task is the Running task, but running task executes tasks based
+      // on the state of Endpoints, hence this next call will be a Register at
+      // the endpoint RPC level.
+      task = stateMachine.getContext().getTask();
+      task.execute(executorService);
+      newState = task.await(2, TimeUnit.SECONDS);
+
+      // If we are in running state, we should be in running.
+      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
+          newState);
+
+      for (ScmTestMock mock : mockServers) {
+        Assert.assertEquals(2, mock.getRpcCount());
+      }
+
+      // This task is the Running task, but running task executes tasks based
+      // on the state of Endpoints, hence this next call will be a
+      // HeartbeatTask at the endpoint RPC level.
+      task = stateMachine.getContext().getTask();
+      task.execute(executorService);
+      newState = task.await(2, TimeUnit.SECONDS);
+
+      // If we are in running state, we should be in running.
+      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING,
+          newState);
+
+
+      for (ScmTestMock mock : mockServers) {
+        Assert.assertEquals(1, mock.getHeartbeatCount());
+        // Assert that heartbeat did indeed carry that State that we said
+        // have in the datanode.
+        Assert.assertEquals(mock.getReportState().getState().getNumber(),
+            StorageContainerDatanodeProtocolProtos.ReportState.states
+                .noContainerReports.getNumber());
+      }
+    }
+  }
+
+  /**
+   * Test state transition with a list of invalid scm configurations,
+   * and verify the state transits to SHUTDOWN each time.
+   */
+  @Test
+  public void testDatanodeStateMachineWithInvalidConfiguration()
+      throws Exception {
+    LinkedList<Map.Entry<String, String>> confList =
+        new LinkedList<Map.Entry<String, String>>();
+    confList.add(Maps.immutableEntry(ScmConfigKeys.OZONE_SCM_NAMES, ""));
+
+    // Invalid ozone.scm.names
+    /** Empty **/
+    confList.add(Maps.immutableEntry(
+        ScmConfigKeys.OZONE_SCM_NAMES, ""));
+    /** Invalid schema **/
+    confList.add(Maps.immutableEntry(
+        ScmConfigKeys.OZONE_SCM_NAMES, "x..y"));
+    /** Invalid port **/
+    confList.add(Maps.immutableEntry(
+        ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz"));
+    /** Port out of range **/
+    confList.add(Maps.immutableEntry(
+        ScmConfigKeys.OZONE_SCM_NAMES, "scm:123456"));
+    // Invalid ozone.scm.datanode.id
+    /** Empty **/
+    confList.add(Maps.immutableEntry(
+        ScmConfigKeys.OZONE_SCM_DATANODE_ID, ""));
+
+    confList.forEach((entry) -> {
+      Configuration perTestConf = new Configuration(conf);
+      perTestConf.setStrings(entry.getKey(), entry.getValue());
+      LOG.info("Test with {} = {}", entry.getKey(), entry.getValue());
+      try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
+          getNewDatanodeDetails(), perTestConf)) {
+        DatanodeStateMachine.DatanodeStates currentState =
+            stateMachine.getContext().getState();
+        Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
+            currentState);
+        DatanodeState<DatanodeStateMachine.DatanodeStates> task =
+            stateMachine.getContext().getTask();
+        task.execute(executorService);
+        DatanodeStateMachine.DatanodeStates newState =
+            task.await(2, TimeUnit.SECONDS);
+        Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
+            newState);
+      } catch (Exception e) {
+        Assert.fail("Unexpected exception found");
+      }
+    });
+  }
+
+  private DatanodeDetails getNewDatanodeDetails() {
+    return DatanodeDetails.newBuilder()
+        .setUuid(UUID.randomUUID().toString())
+        .setHostName("localhost")
+        .setIpAddress("127.0.0.1")
+        .setInfoPort(0)
+        .setInfoSecurePort(0)
+        .setContainerPort(0)
+        .setRatisPort(0)
+        .setOzoneRestPort(0)
+        .build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
new file mode 100644
index 0000000..86888aa
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.testutils;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
+import org.apache.hadoop.ozone.container.common.statemachine.background
+    .BlockDeletingService;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * A test class implementation for {@link BlockDeletingService}.
+ */
+public class BlockDeletingServiceTestImpl
+    extends BlockDeletingService {
+
+  // the service timeout
+  private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0;
+
+  // tests only
+  private CountDownLatch latch;
+  private Thread testingThread;
+  private AtomicInteger numOfProcessed = new AtomicInteger(0);
+
+  public BlockDeletingServiceTestImpl(ContainerManager containerManager,
+      int serviceInterval, Configuration conf) {
+    super(containerManager, serviceInterval,
+        SERVICE_TIMEOUT_IN_MILLISECONDS, conf);
+  }
+
+  @VisibleForTesting
+  public void runDeletingTasks() {
+    if (latch.getCount() > 0) {
+      this.latch.countDown();
+    } else {
+      throw new IllegalStateException("Count already reaches zero");
+    }
+  }
+
+  @VisibleForTesting
+  public boolean isStarted() {
+    return latch != null && testingThread.isAlive();
+  }
+
+  public int getTimesOfProcessed() {
+    return numOfProcessed.get();
+  }
+
+  // Override the implementation to start a single on-call control thread.
+  @Override public void start() {
+    PeriodicalTask svc = new PeriodicalTask();
+    // In test mode, relies on a latch countdown to runDeletingTasks tasks.
+    Runnable r = () -> {
+      while (true) {
+        latch = new CountDownLatch(1);
+        try {
+          latch.await();
+        } catch (InterruptedException e) {
+          break;
+        }
+        Future<?> future = this.getExecutorService().submit(svc);
+        try {
+          // for tests, we only wait for 3s for completion
+          future.get(3, TimeUnit.SECONDS);
+          numOfProcessed.incrementAndGet();
+        } catch (Exception e) {
+          return;
+        }
+      }
+    };
+
+    testingThread = new ThreadFactoryBuilder()
+        .setDaemon(true)
+        .build()
+        .newThread(r);
+    testingThread.start();
+  }
+
+  @Override
+  public void shutdown() {
+    testingThread.interrupt();
+    super.shutdown();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
new file mode 100644
index 0000000..4e8a90b
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.ozone.container.testutils;
+// Helper classes for ozone and container tests.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/README.md
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/README.md b/hadoop-hdds/framework/README.md
new file mode 100644
index 0000000..59cdac7
--- /dev/null
+++ b/hadoop-hdds/framework/README.md
@@ -0,0 +1,24 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+# Server framework for HDDS/Ozone
+
+This project contains generic utilities and resources for all the HDDS/Ozone 
+server-side components.
+
+The project is shared between the server/service projects but not with the 
+client packages.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
new file mode 100644
index 0000000..a234b8e
--- /dev/null
+++ b/hadoop-hdds/framework/pom.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0";
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-hdds</artifactId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>hadoop-hdds-server-framework</artifactId>
+  <version>3.2.0-SNAPSHOT</version>
+  <description>Apache HDDS server framework</description>
+  <name>Apache HDDS Server Common</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.component>hdds</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>.gitattributes</exclude>
+            <exclude>.idea/**</exclude>
+            <exclude>src/main/conf/*</exclude>
+            <exclude>src/main/webapps/static/angular-1.6.4.min.js</exclude>
+            
<exclude>src/main/webapps/static/angular-nvd3-1.0.9.min.js</exclude>
+            
<exclude>src/main/webapps/static/angular-route-1.6.4.min.js</exclude>
+            <exclude>src/main/webapps/static/d3-3.5.17.min.js</exclude>
+            <exclude>src/main/webapps/static/nvd3-1.8.5.min.css</exclude>
+            <exclude>src/main/webapps/static/nvd3-1.8.5.min.css.map</exclude>
+            <exclude>src/main/webapps/static/nvd3-1.8.5.min.js</exclude>
+            <exclude>src/main/webapps/static/nvd3-1.8.5.min.js.map</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>copy web resources</id>
+            <phase>compile</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <copy 
toDir="${project.build.directory}/classes/webapps/static">
+                  <fileset
+                          
dir="${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static">
+                  </fileset>
+                </copy>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java
new file mode 100644
index 0000000..90de002
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java
@@ -0,0 +1,218 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.server;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.net.NetUtils;
+import org.eclipse.jetty.webapp.WebAppContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpServlet;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
+
+/**
+ * Base class for HTTP server of the Ozone related components.
+ */
+public abstract class BaseHttpServer {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BaseHttpServer.class);
+
+  private HttpServer2 httpServer;
+  private final Configuration conf;
+
+  private InetSocketAddress httpAddress;
+  private InetSocketAddress httpsAddress;
+
+  private HttpConfig.Policy policy;
+
+  private String name;
+
+  public BaseHttpServer(Configuration conf, String name) throws IOException {
+    this.name = name;
+    this.conf = conf;
+    if (isEnabled()) {
+      policy = DFSUtil.getHttpPolicy(conf);
+      if (policy.isHttpEnabled()) {
+        this.httpAddress = getHttpBindAddress();
+      }
+      if (policy.isHttpsEnabled()) {
+        this.httpsAddress = getHttpsBindAddress();
+      }
+      HttpServer2.Builder builder = null;
+      builder = DFSUtil.httpServerTemplateForNNAndJN(conf, this.httpAddress,
+          this.httpsAddress, name, getSpnegoPrincipal(), getKeytabFile());
+
+      final boolean xFrameEnabled = conf.getBoolean(
+          DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+          DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+      final String xFrameOptionValue = conf.getTrimmed(
+          DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+          DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+      
builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
+      httpServer = builder.build();
+
+    }
+
+  }
+
+  /**
+   * Add a servlet to BaseHttpServer.
+   * @param servletName The name of the servlet
+   * @param pathSpec The path spec for the servlet
+   * @param clazz The servlet class
+   */
+  protected void addServlet(String servletName, String pathSpec,
+                            Class<? extends HttpServlet> clazz) {
+    httpServer.addServlet(servletName, pathSpec, clazz);
+  }
+
+  /**
+   * Returns the WebAppContext associated with this HttpServer.
+   * @return WebAppContext
+   */
+  protected WebAppContext getWebAppContext() {
+    return httpServer.getWebAppContext();
+  }
+
+  protected InetSocketAddress getBindAddress(String bindHostKey,
+      String addressKey, String bindHostDefault, int bindPortdefault) {
+    final Optional<String> bindHost =
+        getHostNameFromConfigKeys(conf, bindHostKey);
+
+    final Optional<Integer> addressPort =
+        getPortNumberFromConfigKeys(conf, addressKey);
+
+    final Optional<String> addresHost =
+        getHostNameFromConfigKeys(conf, addressKey);
+
+    String hostName = bindHost.or(addresHost).or(bindHostDefault);
+
+    return NetUtils.createSocketAddr(
+        hostName + ":" + addressPort.or(bindPortdefault));
+  }
+
+  /**
+   * Retrieve the socket address that should be used by clients to connect
+   * to the  HTTPS web interface.
+   *
+   * @return Target InetSocketAddress for the Ozone HTTPS endpoint.
+   */
+  public InetSocketAddress getHttpsBindAddress() {
+    return getBindAddress(getHttpsBindHostKey(), getHttpsAddressKey(),
+        getBindHostDefault(), getHttpsBindPortDefault());
+  }
+
+  /**
+   * Retrieve the socket address that should be used by clients to connect
+   * to the  HTTP web interface.
+   * <p>
+   * * @return Target InetSocketAddress for the Ozone HTTP endpoint.
+   */
+  public InetSocketAddress getHttpBindAddress() {
+    return getBindAddress(getHttpBindHostKey(), getHttpAddressKey(),
+        getBindHostDefault(), getHttpBindPortDefault());
+
+  }
+
+  public void start() throws IOException {
+    if (httpServer != null && isEnabled()) {
+      httpServer.start();
+      updateConnectorAddress();
+    }
+
+  }
+
+  private boolean isEnabled() {
+    return conf.getBoolean(getEnabledKey(), true);
+  }
+
+  public void stop() throws Exception {
+    if (httpServer != null) {
+      httpServer.stop();
+    }
+  }
+
+  /**
+   * Update the configured listen address based on the real port
+   * <p>
+   * (eg. replace :0 with real port)
+   */
+  public void updateConnectorAddress() {
+    int connIdx = 0;
+    if (policy.isHttpEnabled()) {
+      httpAddress = httpServer.getConnectorAddress(connIdx++);
+      String realAddress = NetUtils.getHostPortString(httpAddress);
+      conf.set(getHttpAddressKey(), realAddress);
+      LOG.info(
+          String.format("HTTP server of %s is listening at http://%s";,
+              name.toUpperCase(), realAddress));
+    }
+
+    if (policy.isHttpsEnabled()) {
+      httpsAddress = httpServer.getConnectorAddress(connIdx);
+      String realAddress = NetUtils.getHostPortString(httpsAddress);
+      conf.set(getHttpsAddressKey(), realAddress);
+      LOG.info(
+          String.format("HTTP server of %s is listening at https://%s";,
+              name.toUpperCase(), realAddress));
+    }
+  }
+
+  public InetSocketAddress getHttpAddress() {
+    return httpAddress;
+  }
+
+  public InetSocketAddress getHttpsAddress() {
+    return httpsAddress;
+  }
+
+  protected abstract String getHttpAddressKey();
+
+  protected abstract String getHttpsAddressKey();
+
+  protected abstract String getHttpBindHostKey();
+
+  protected abstract String getHttpsBindHostKey();
+
+  protected abstract String getBindHostDefault();
+
+  protected abstract int getHttpBindPortDefault();
+
+  protected abstract int getHttpsBindPortDefault();
+
+  protected abstract String getKeytabFile();
+
+  protected abstract String getSpnegoPrincipal();
+
+  protected abstract String getEnabledKey();
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java
new file mode 100644
index 0000000..2cf5b35
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.server;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+
+/**
+ * Generic utilities for all HDDS/Ozone servers.
+ */
+public class ServerUtils {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ServerUtils.class);
+
+  private ServerUtils() {
+  }
+
+  /**
+   * Checks that a given value is with a range.
+   *
+   * For example, sanitizeUserArgs(17, 3, 5, 10)
+   * ensures that 17 is greater/equal than 3 * 5 and less/equal to 3 * 10.
+   *
+   * @param valueTocheck  - value to check
+   * @param baseValue     - the base value that is being used.
+   * @param minFactor     - range min - a 2 here makes us ensure that value
+   *                        valueTocheck is at least twice the baseValue.
+   * @param maxFactor     - range max
+   * @return long
+   */
+  public static long sanitizeUserArgs(long valueTocheck, long baseValue,
+      long minFactor, long maxFactor)
+      throws IllegalArgumentException {
+    if ((valueTocheck >= (baseValue * minFactor)) &&
+        (valueTocheck <= (baseValue * maxFactor))) {
+      return valueTocheck;
+    }
+    String errMsg = String.format("%d is not within min = %d or max = " +
+        "%d", valueTocheck, baseValue * minFactor, baseValue * maxFactor);
+    throw new IllegalArgumentException(errMsg);
+  }
+
+
+  /**
+   * After starting an RPC server, updates configuration with the actual
+   * listening address of that server. The listening address may be different
+   * from the configured address if, for example, the configured address uses
+   * port 0 to request use of an ephemeral port.
+   *
+   * @param conf configuration to update
+   * @param rpcAddressKey configuration key for RPC server address
+   * @param addr configured address
+   * @param rpcServer started RPC server.
+   */
+  public static InetSocketAddress updateRPCListenAddress(
+      OzoneConfiguration conf, String rpcAddressKey,
+      InetSocketAddress addr, RPC.Server rpcServer) {
+    return updateListenAddress(conf, rpcAddressKey, addr,
+        rpcServer.getListenerAddress());
+  }
+
+
+  /**
+   * After starting an server, updates configuration with the actual
+   * listening address of that server. The listening address may be different
+   * from the configured address if, for example, the configured address uses
+   * port 0 to request use of an ephemeral port.
+   *
+   * @param conf       configuration to update
+   * @param addressKey configuration key for RPC server address
+   * @param addr       configured address
+   * @param listenAddr the real listening address.
+   */
+  public static InetSocketAddress updateListenAddress(OzoneConfiguration conf,
+      String addressKey, InetSocketAddress addr, InetSocketAddress listenAddr) 
{
+    InetSocketAddress updatedAddr = new InetSocketAddress(addr.getHostString(),
+        listenAddr.getPort());
+    conf.set(addressKey,
+        addr.getHostString() + ":" + listenAddr.getPort());
+    return updatedAddr;
+  }
+
+
+  /**
+   * Releases a http connection if the request is not null.
+   * @param request
+   */
+  public static void releaseConnection(HttpRequestBase request) {
+    if (request != null) {
+      request.releaseConnection();
+    }
+  }
+
+
+  /**
+   * Checks and creates Ozone Metadir Path if it does not exist.
+   *
+   * @param conf - Configuration
+   *
+   * @return File MetaDir
+   */
+  public static File getOzoneMetaDirPath(Configuration conf) {
+    String metaDirPath = conf.getTrimmed(OzoneConfigKeys
+        .OZONE_METADATA_DIRS);
+    Preconditions.checkNotNull(metaDirPath);
+    File dirPath = new File(metaDirPath);
+    if (!dirPath.exists() && !dirPath.mkdirs()) {
+      throw new IllegalArgumentException("Unable to create paths. Path: " +
+          dirPath);
+    }
+    return dirPath;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java
new file mode 100644
index 0000000..bcd75f3
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.server;
+
+/**
+ * Common runtime information for any service components.
+ *
+ * Note: it's intentional to not use MXBean or MBean as a suffix  of the name.
+ *
+ * Most of the services extends the ServiceRuntimeInfoImpl class and also
+ * implements a specific MXBean interface which extends this interface.
+ *
+ * This inheritance from multiple path could confuse the jmx system and
+ * some jmx properties could be disappeared.
+ *
+ * The solution is to always extend this interface and use the jmx naming
+ * convention in the new interface..
+ */
+public interface ServiceRuntimeInfo {
+
+  /**
+   * Gets the version of Hadoop.
+   *
+   * @return the version
+   */
+  String getVersion();
+
+  /**
+   * Get the version of software running on the Namenode.
+   *
+   * @return a string representing the version
+   */
+  String getSoftwareVersion();
+
+  /**
+   * Get the compilation information which contains date, user and branch.
+   *
+   * @return the compilation information, as a JSON string.
+   */
+  String getCompileInfo();
+
+  /**
+   * Gets the NN start time in milliseconds.
+   *
+   * @return the NN start time in msec
+   */
+  long getStartedTimeInMillis();
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java
new file mode 100644
index 0000000..36d6b64
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.server;
+
+import org.apache.hadoop.util.VersionInfo;
+
+/**
+ * Helper base class to report the standard version and runtime information.
+ *
+ */
+public class ServiceRuntimeInfoImpl implements ServiceRuntimeInfo {
+
+  private long startedTimeInMillis;
+
+  @Override
+  public String getVersion() {
+    return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision();
+  }
+
+  @Override
+  public String getSoftwareVersion() {
+    return VersionInfo.getVersion();
+  }
+
+  @Override
+  public String getCompileInfo() {
+    return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
+        + VersionInfo.getBranch();
+  }
+
+  @Override
+  public long getStartedTimeInMillis() {
+    return startedTimeInMillis;
+  }
+
+  public void setStartTime() {
+    startedTimeInMillis = System.currentTimeMillis();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java
new file mode 100644
index 0000000..35ad5e7
--- /dev/null
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.server;
+
+/**
+ * Common server side utilities for all the hdds/ozone server components.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651a05a1/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js 
b/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js
new file mode 100644
index 0000000..3b67167
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function () {
+  "use strict";
+
+  var data = {ozone: {enabled: false}};
+
+  dust.loadSource(dust.compile($('#tmpl-dn').html(), 'dn'));
+
+  function loadDatanodeInfo() {
+    $.get('/jmx?qry=Hadoop:service=DataNode,name=DataNodeInfo', function(resp) 
{
+      data.dn = workaround(resp.beans[0]);
+      data.dn.HostName = resp.beans[0]['DatanodeHostname'];
+      render();
+    }).fail(show_err_msg);
+  }
+
+  function loadOzoneScmInfo() {
+        
$.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=SCMConnectionManager', 
function (resp) {
+            if (resp.beans.length > 0) {
+                data.ozone.SCMServers = resp.beans[0].SCMServers;
+                data.ozone.enabled = true;
+                render();
+            }
+        }).fail(show_err_msg);
+  }
+
+  function loadOzoneStorageInfo() {
+        
$.get('/jmx?qry=Hadoop:service=OzoneDataNode,name=ContainerLocationManager', 
function (resp) {
+            if (resp.beans.length > 0) {
+                data.ozone.LocationReport = resp.beans[0].LocationReport;
+                data.ozone.enabled = true;
+                render();
+            }
+        }).fail(show_err_msg);
+    }
+
+  function workaround(dn) {
+    function node_map_to_array(nodes) {
+      var res = [];
+      for (var n in nodes) {
+        var p = nodes[n];
+        p.name = n;
+        res.push(p);
+      }
+      return res;
+    }
+
+    dn.VolumeInfo = node_map_to_array(JSON.parse(dn.VolumeInfo));
+    dn.BPServiceActorInfo = JSON.parse(dn.BPServiceActorInfo);
+
+    return dn;
+  }
+
+  function render() {
+    var base = dust.makeBase({
+      'helper_relative_time' : function (chunk, ctx, bodies, params) {
+        var value = dust.helpers.tap(params.value, chunk, ctx);
+        return chunk.write(moment().subtract(Number(value), 
'seconds').fromNow(true));
+      }
+    });
+    dust.render('dn', base.push(data), function(err, out) {
+      $('#tab-overview').html(out);
+      $('#tab-overview').addClass('active');
+    });
+  }
+
+  function show_err_msg() {
+    $('#alert-panel-body').html("Failed to load datanode information");
+    $('#alert-panel').show();
+  }
+
+    loadDatanodeInfo();
+    loadOzoneScmInfo();
+    loadOzoneStorageInfo();
+
+})();


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to