http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
deleted file mode 100644
index a6a967c..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ /dev/null
@@ -1,942 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
-import com.google.protobuf.BlockingService;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.block.BlockManager;
-import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
-import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
-import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
-import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
-import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
-import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
-import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
-import org.apache.hadoop.hdds.scm.container.Mapping;
-import org.apache.hadoop.hdds.scm.container.replication
-    .ReplicationActivityStatus;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
-import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
-import org.apache.hadoop.hdds.scm.node.DeadNodeHandler;
-import org.apache.hadoop.hdds.scm.node.NewNodeHandler;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.node.NodeReportHandler;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.hdds.scm.node.StaleNodeHandler;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineCloseHandler;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineActionEventHandler;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineReportHandler;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.common.StorageInfo;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.GenericOptionsParser;
-import org.apache.hadoop.util.StringUtils;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeUnit;
-
-import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-/**
- * StorageContainerManager is the main entry point for the service that
- * provides information about
- * which SCM nodes host containers.
- *
- * <p>DataNodes report to StorageContainerManager using heartbeat messages.
- * SCM allocates containers
- * and returns a pipeline.
- *
- * <p>A client once it gets a pipeline (a list of datanodes) will connect to
- * the datanodes and
- * create a container, which then can be used to store data.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public final class StorageContainerManager extends ServiceRuntimeInfoImpl
-    implements SCMMXBean {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(StorageContainerManager.class);
-  private static final String USAGE =
-      "Usage: \n ozone scm [genericOptions] "
-          + "[ "
-          + StartupOption.INIT.getName()
-          + " [ "
-          + StartupOption.CLUSTERID.getName()
-          + " <cid> ] ]\n "
-          + "ozone scm [genericOptions] [ "
-          + StartupOption.GENCLUSTERID.getName()
-          + " ]\n "
-          + "ozone scm [ "
-          + StartupOption.HELP.getName()
-          + " ]\n";
-  /**
-   * SCM metrics.
-   */
-  private static SCMMetrics metrics;
-
-  /*
-   * RPC Endpoints exposed by SCM.
-   */
-  private final SCMDatanodeProtocolServer datanodeProtocolServer;
-  private final SCMBlockProtocolServer blockProtocolServer;
-  private final SCMClientProtocolServer clientProtocolServer;
-
-  /*
-   * State Managers of SCM.
-   */
-  private final NodeManager scmNodeManager;
-  private final Mapping scmContainerManager;
-  private final BlockManager scmBlockManager;
-  private final SCMStorage scmStorage;
-
-  private final EventQueue eventQueue;
-  /*
-   * HTTP endpoint for JMX access.
-   */
-  private final StorageContainerManagerHttpServer httpServer;
-  /**
-   * SCM super user.
-   */
-  private final String scmUsername;
-  private final Collection<String> scmAdminUsernames;
-  /**
-   * SCM mxbean.
-   */
-  private ObjectName scmInfoBeanName;
-  /**
-   * Key = DatanodeUuid, value = ContainerStat.
-   */
-  private Cache<String, ContainerStat> containerReportCache;
-
-  private final ReplicationManager replicationManager;
-
-  private final LeaseManager<Long> commandWatcherLeaseManager;
-
-  private final ReplicationActivityStatus replicationStatus;
-  private final SCMChillModeManager scmChillModeManager;
-
-  /**
-   * Creates a new StorageContainerManager. Configuration will be updated
-   * with information on the
-   * actual listening addresses used for RPC servers.
-   *
-   * @param conf configuration
-   */
-  private StorageContainerManager(OzoneConfiguration conf) throws IOException {
-
-    final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
-    StorageContainerManager.initMetrics();
-    initContainerReportCache(conf);
-
-    scmStorage = new SCMStorage(conf);
-    if (scmStorage.getState() != StorageState.INITIALIZED) {
-      throw new SCMException("SCM not initialized.", ResultCodes
-          .SCM_NOT_INITIALIZED);
-    }
-
-    eventQueue = new EventQueue();
-
-    scmNodeManager = new SCMNodeManager(
-        conf, scmStorage.getClusterID(), this, eventQueue);
-    scmContainerManager = new ContainerMapping(
-        conf, getScmNodeManager(), cacheSize, eventQueue);
-    scmBlockManager = new BlockManagerImpl(
-        conf, getScmNodeManager(), scmContainerManager, eventQueue);
-
-    replicationStatus = new ReplicationActivityStatus();
-
-    CloseContainerEventHandler closeContainerHandler =
-        new CloseContainerEventHandler(scmContainerManager);
-    NodeReportHandler nodeReportHandler =
-        new NodeReportHandler(scmNodeManager);
-    PipelineReportHandler pipelineReportHandler =
-            new PipelineReportHandler(
-                    scmContainerManager.getPipelineSelector());
-    CommandStatusReportHandler cmdStatusReportHandler =
-        new CommandStatusReportHandler();
-
-    NewNodeHandler newNodeHandler = new NewNodeHandler(scmNodeManager);
-    StaleNodeHandler staleNodeHandler =
-        new StaleNodeHandler(scmContainerManager.getPipelineSelector());
-    DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
-        getScmContainerManager().getStateManager());
-    ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
-    PendingDeleteHandler pendingDeleteHandler =
-        new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
-
-    ContainerReportHandler containerReportHandler =
-        new ContainerReportHandler(scmContainerManager, scmNodeManager,
-            replicationStatus);
-    scmChillModeManager = new SCMChillModeManager(conf,
-        getScmContainerManager().getStateManager().getAllContainers(),
-        eventQueue);
-    PipelineActionEventHandler pipelineActionEventHandler =
-        new PipelineActionEventHandler();
-
-    PipelineCloseHandler pipelineCloseHandler =
-        new PipelineCloseHandler(scmContainerManager.getPipelineSelector());
-
-    long watcherTimeout =
-        conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
-            HDDS_SCM_WATCHER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-
-    commandWatcherLeaseManager = new LeaseManager<>("CommandWatcher",
-        watcherTimeout);
-
-    RetriableDatanodeEventWatcher retriableDatanodeEventWatcher =
-        new RetriableDatanodeEventWatcher<>(
-            SCMEvents.RETRIABLE_DATANODE_COMMAND,
-            SCMEvents.DELETE_BLOCK_STATUS,
-            commandWatcherLeaseManager);
-    retriableDatanodeEventWatcher.start(eventQueue);
-
-    //TODO: support configurable containerPlacement policy
-    ContainerPlacementPolicy containerPlacementPolicy =
-        new SCMContainerPlacementCapacity(scmNodeManager, conf);
-
-    replicationManager = new ReplicationManager(containerPlacementPolicy,
-        scmContainerManager.getStateManager(), eventQueue,
-        commandWatcherLeaseManager);
-
-    // setup CloseContainer watcher
-    CloseContainerWatcher closeContainerWatcher =
-        new CloseContainerWatcher(SCMEvents.CLOSE_CONTAINER_RETRYABLE_REQ,
-            SCMEvents.CLOSE_CONTAINER_STATUS, commandWatcherLeaseManager,
-            scmContainerManager);
-    closeContainerWatcher.start(eventQueue);
-
-    scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys
-        .OZONE_ADMINISTRATORS);
-    scmUsername = UserGroupInformation.getCurrentUser().getUserName();
-    if (!scmAdminUsernames.contains(scmUsername)) {
-      scmAdminUsernames.add(scmUsername);
-    }
-
-    datanodeProtocolServer = new SCMDatanodeProtocolServer(conf, this,
-        eventQueue);
-    blockProtocolServer = new SCMBlockProtocolServer(conf, this);
-    clientProtocolServer = new SCMClientProtocolServer(conf, this);
-    httpServer = new StorageContainerManagerHttpServer(conf);
-
-    eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
-    eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, 
scmNodeManager);
-    eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
-    eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportHandler);
-    eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
-    eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
-    eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
-    eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
-    eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
-    eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
-    eventQueue.addHandler(SCMEvents.START_REPLICATION,
-        replicationStatus.getReplicationStatusListener());
-    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS,
-        replicationStatus.getChillModeStatusListener());
-    eventQueue
-        .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler);
-    eventQueue.addHandler(SCMEvents.DELETE_BLOCK_STATUS,
-        (DeletedBlockLogImpl) scmBlockManager.getDeletedBlockLog());
-    eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS,
-        pipelineActionEventHandler);
-    eventQueue.addHandler(SCMEvents.PIPELINE_CLOSE, pipelineCloseHandler);
-    eventQueue.addHandler(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
-        scmChillModeManager);
-    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS,
-        (BlockManagerImpl) scmBlockManager);
-    eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, clientProtocolServer);
-    eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler);
-
-    registerMXBean();
-  }
-
-  /**
-   * Builds a message for logging startup information about an RPC server.
-   *
-   * @param description RPC server description
-   * @param addr RPC server listening address
-   * @return server startup message
-   */
-  public static String buildRpcServerStartMessage(String description,
-      InetSocketAddress addr) {
-    return addr != null
-        ? String.format("%s is listening at %s", description, addr.toString())
-        : String.format("%s not started", description);
-  }
-
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  public static RPC.Server startRpcServer(
-      OzoneConfiguration conf,
-      InetSocketAddress addr,
-      Class<?> protocol,
-      BlockingService instance,
-      int handlerCount)
-      throws IOException {
-    RPC.Server rpcServer =
-        new RPC.Builder(conf)
-            .setProtocol(protocol)
-            .setInstance(instance)
-            .setBindAddress(addr.getHostString())
-            .setPort(addr.getPort())
-            .setNumHandlers(handlerCount)
-            .setVerbose(false)
-            .setSecretManager(null)
-            .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-    return rpcServer;
-  }
-
-  /**
-   * Main entry point for starting StorageContainerManager.
-   *
-   * @param argv arguments
-   * @throws IOException if startup fails due to I/O error
-   */
-  public static void main(String[] argv) throws IOException {
-    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-    try {
-      OzoneConfiguration conf = new OzoneConfiguration();
-      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
-      if (!hParser.isParseSuccessful()) {
-        System.err.println("USAGE: " + USAGE + "\n");
-        hParser.printGenericCommandUsage(System.err);
-        System.exit(1);
-      }
-      StorageContainerManager scm = createSCM(
-          hParser.getRemainingArgs(), conf, true);
-      if (scm != null) {
-        scm.start();
-        scm.join();
-      }
-    } catch (Throwable t) {
-      LOG.error("Failed to start the StorageContainerManager.", t);
-      terminate(1, t);
-    }
-  }
-
-  private static void printUsage(PrintStream out) {
-    out.println(USAGE + "\n");
-  }
-
-  /**
-   * Create an SCM instance based on the supplied command-line arguments.
-   *
-   * This method is intended for unit tests only. It suppresses the
-   * startup/shutdown message and skips registering Unix signal
-   * handlers.
-   *
-   * @param args command line arguments.
-   * @param conf HDDS configuration
-   * @return SCM instance
-   * @throws IOException
-   */
-  @VisibleForTesting
-  public static StorageContainerManager createSCM(
-      String[] args, OzoneConfiguration conf) throws IOException {
-    return createSCM(args, conf, false);
-  }
-
-  /**
-   * Create an SCM instance based on the supplied command-line arguments.
-   *
-   * @param args command-line arguments.
-   * @param conf HDDS configuration
-   * @param printBanner if true, then log a verbose startup message.
-   * @return SCM instance
-   * @throws IOException
-   */
-  private static StorageContainerManager createSCM(
-      String[] args,
-      OzoneConfiguration conf,
-      boolean printBanner) throws IOException {
-    String[] argv = (args == null) ? new String[0] : args;
-    if (!HddsUtils.isHddsEnabled(conf)) {
-      System.err.println(
-          "SCM cannot be started in secure mode or when " + OZONE_ENABLED + "" 
+
-              " is set to false");
-      System.exit(1);
-    }
-    StartupOption startOpt = parseArguments(argv);
-    if (startOpt == null) {
-      printUsage(System.err);
-      terminate(1);
-      return null;
-    }
-    switch (startOpt) {
-    case INIT:
-      if (printBanner) {
-        StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-            LOG);
-      }
-      terminate(scmInit(conf) ? 0 : 1);
-      return null;
-    case GENCLUSTERID:
-      if (printBanner) {
-        StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-            LOG);
-      }
-      System.out.println("Generating new cluster id:");
-      System.out.println(StorageInfo.newClusterID());
-      terminate(0);
-      return null;
-    case HELP:
-      printUsage(System.err);
-      terminate(0);
-      return null;
-    default:
-      if (printBanner) {
-        StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-            LOG);
-      }
-      return new StorageContainerManager(conf);
-    }
-  }
-
-  /**
-   * Routine to set up the Version info for StorageContainerManager.
-   *
-   * @param conf OzoneConfiguration
-   * @return true if SCM initialization is successful, false otherwise.
-   * @throws IOException if init fails due to I/O error
-   */
-  public static boolean scmInit(OzoneConfiguration conf) throws IOException {
-    SCMStorage scmStorage = new SCMStorage(conf);
-    StorageState state = scmStorage.getState();
-    if (state != StorageState.INITIALIZED) {
-      try {
-        String clusterId = StartupOption.INIT.getClusterId();
-        if (clusterId != null && !clusterId.isEmpty()) {
-          scmStorage.setClusterId(clusterId);
-        }
-        scmStorage.initialize();
-        System.out.println(
-            "SCM initialization succeeded."
-                + "Current cluster id for sd="
-                + scmStorage.getStorageDir()
-                + ";cid="
-                + scmStorage.getClusterID());
-        return true;
-      } catch (IOException ioe) {
-        LOG.error("Could not initialize SCM version file", ioe);
-        return false;
-      }
-    } else {
-      System.out.println(
-          "SCM already initialized. Reusing existing"
-              + " cluster id for sd="
-              + scmStorage.getStorageDir()
-              + ";cid="
-              + scmStorage.getClusterID());
-      return true;
-    }
-  }
-
-  private static StartupOption parseArguments(String[] args) {
-    int argsLen = (args == null) ? 0 : args.length;
-    StartupOption startOpt = StartupOption.HELP;
-    if (argsLen == 0) {
-      startOpt = StartupOption.REGULAR;
-    }
-    for (int i = 0; i < argsLen; i++) {
-      String cmd = args[i];
-      if (StartupOption.INIT.getName().equalsIgnoreCase(cmd)) {
-        startOpt = StartupOption.INIT;
-        if (argsLen > 3) {
-          return null;
-        }
-        for (i = i + 1; i < argsLen; i++) {
-          if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
-            i++;
-            if (i < argsLen && !args[i].isEmpty()) {
-              startOpt.setClusterId(args[i]);
-            } else {
-              // if no cluster id specified or is empty string, return null
-              LOG.error(
-                  "Must specify a valid cluster ID after the "
-                      + StartupOption.CLUSTERID.getName()
-                      + " flag");
-              return null;
-            }
-          } else {
-            return null;
-          }
-        }
-      } else {
-        if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
-          if (argsLen > 1) {
-            return null;
-          }
-          startOpt = StartupOption.GENCLUSTERID;
-        }
-      }
-    }
-    return startOpt;
-  }
-
-  /**
-   * Initialize SCM metrics.
-   */
-  public static void initMetrics() {
-    metrics = SCMMetrics.create();
-  }
-
-  /**
-   * Return SCM metrics instance.
-   */
-  public static SCMMetrics getMetrics() {
-    return metrics == null ? SCMMetrics.create() : metrics;
-  }
-
-  public SCMStorage getScmStorage() {
-    return scmStorage;
-  }
-
-  public SCMDatanodeProtocolServer getDatanodeProtocolServer() {
-    return datanodeProtocolServer;
-  }
-
-  public SCMBlockProtocolServer getBlockProtocolServer() {
-    return blockProtocolServer;
-  }
-
-  public SCMClientProtocolServer getClientProtocolServer() {
-    return clientProtocolServer;
-  }
-
-  /**
-   * Initialize container reports cache that sent from datanodes.
-   *
-   * @param conf
-   */
-  private void initContainerReportCache(OzoneConfiguration conf) {
-    containerReportCache =
-        CacheBuilder.newBuilder()
-            .expireAfterAccess(Long.MAX_VALUE, TimeUnit.MILLISECONDS)
-            .maximumSize(Integer.MAX_VALUE)
-            .removalListener(
-                new RemovalListener<String, ContainerStat>() {
-                  @Override
-                  public void onRemoval(
-                      RemovalNotification<String, ContainerStat>
-                          removalNotification) {
-                    synchronized (containerReportCache) {
-                      ContainerStat stat = removalNotification.getValue();
-                      // remove invalid container report
-                      metrics.decrContainerStat(stat);
-                      LOG.debug(
-                          "Remove expired container stat entry for datanode: " 
+
-                              "{}.",
-                          removalNotification.getKey());
-                    }
-                  }
-                })
-            .build();
-  }
-
-  private void registerMXBean() {
-    Map<String, String> jmxProperties = new HashMap<>();
-    jmxProperties.put("component", "ServerRuntime");
-    this.scmInfoBeanName =
-        MBeans.register(
-            "StorageContainerManager", "StorageContainerManagerInfo",
-            jmxProperties, this);
-  }
-
-  private void unregisterMXBean() {
-    if (this.scmInfoBeanName != null) {
-      MBeans.unregister(this.scmInfoBeanName);
-      this.scmInfoBeanName = null;
-    }
-  }
-
-  @VisibleForTesting
-  public ContainerInfo getContainerInfo(long containerID) throws
-      IOException {
-    return scmContainerManager.getContainer(containerID);
-  }
-
-  /**
-   * Returns listening address of StorageLocation Protocol RPC server.
-   *
-   * @return listen address of StorageLocation RPC server
-   */
-  @VisibleForTesting
-  public InetSocketAddress getClientRpcAddress() {
-    return getClientProtocolServer().getClientRpcAddress();
-  }
-
-  @Override
-  public String getClientRpcPort() {
-    InetSocketAddress addr = getClientRpcAddress();
-    return addr == null ? "0" : Integer.toString(addr.getPort());
-  }
-
-  /**
-   * Returns listening address of StorageDatanode Protocol RPC server.
-   *
-   * @return Address where datanode are communicating.
-   */
-  public InetSocketAddress getDatanodeRpcAddress() {
-    return getDatanodeProtocolServer().getDatanodeRpcAddress();
-  }
-
-  @Override
-  public String getDatanodeRpcPort() {
-    InetSocketAddress addr = getDatanodeRpcAddress();
-    return addr == null ? "0" : Integer.toString(addr.getPort());
-  }
-
-  /**
-   * Start service.
-   */
-  public void start() throws IOException {
-    LOG.info(
-        buildRpcServerStartMessage(
-            "StorageContainerLocationProtocol RPC server",
-            getClientRpcAddress()));
-    DefaultMetricsSystem.initialize("StorageContainerManager");
-
-    commandWatcherLeaseManager.start();
-    getClientProtocolServer().start();
-
-    LOG.info(buildRpcServerStartMessage("ScmBlockLocationProtocol RPC " +
-        "server", getBlockProtocolServer().getBlockRpcAddress()));
-    getBlockProtocolServer().start();
-
-    LOG.info(buildRpcServerStartMessage("ScmDatanodeProtocl RPC " +
-        "server", getDatanodeProtocolServer().getDatanodeRpcAddress()));
-    getDatanodeProtocolServer().start();
-
-    httpServer.start();
-    scmBlockManager.start();
-    replicationStatus.start();
-    replicationManager.start();
-    setStartTime();
-  }
-
-  /**
-   * Stop service.
-   */
-  public void stop() {
-
-    try {
-      LOG.info("Stopping Replication Activity Status tracker.");
-      replicationStatus.close();
-    } catch (Exception ex) {
-      LOG.error("Replication Activity Status tracker stop failed.", ex);
-    }
-
-
-    try {
-      LOG.info("Stopping Replication Manager Service.");
-      replicationManager.stop();
-    } catch (Exception ex) {
-      LOG.error("Replication manager service stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping Lease Manager of the command watchers");
-      commandWatcherLeaseManager.shutdown();
-    } catch (Exception ex) {
-      LOG.error("Lease Manager of the command watchers stop failed");
-    }
-
-    try {
-      LOG.info("Stopping datanode service RPC server");
-      getDatanodeProtocolServer().stop();
-
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager datanode RPC stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping block service RPC server");
-      getBlockProtocolServer().stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager blockRpcServer stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping the StorageContainerLocationProtocol RPC server");
-      getClientProtocolServer().stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager clientRpcServer stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping Storage Container Manager HTTP server.");
-      httpServer.stop();
-    } catch (Exception ex) {
-      LOG.error("Storage Container Manager HTTP server stop failed.", ex);
-    }
-
-    try {
-      LOG.info("Stopping Block Manager Service.");
-      scmBlockManager.stop();
-    } catch (Exception ex) {
-      LOG.error("SCM block manager service stop failed.", ex);
-    }
-
-    if (containerReportCache != null) {
-      containerReportCache.invalidateAll();
-      containerReportCache.cleanUp();
-    }
-
-    if (metrics != null) {
-      metrics.unRegister();
-    }
-
-    unregisterMXBean();
-    // Event queue must be stopped before the DB store is closed at the end.
-    try {
-      LOG.info("Stopping SCM Event Queue.");
-      eventQueue.close();
-    } catch (Exception ex) {
-      LOG.error("SCM Event Queue stop failed", ex);
-    }
-    IOUtils.cleanupWithLogger(LOG, scmContainerManager);
-  }
-
-  /**
-   * Wait until service has completed shutdown.
-   */
-  public void join() {
-    try {
-      getBlockProtocolServer().join();
-      getClientProtocolServer().join();
-      getDatanodeProtocolServer().join();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.info("Interrupted during StorageContainerManager join.");
-    }
-  }
-
-  /**
-   * Returns the Number of Datanodes that are communicating with SCM.
-   *
-   * @param nodestate Healthy, Dead etc.
-   * @return int -- count
-   */
-  public int getNodeCount(NodeState nodestate) {
-    return scmNodeManager.getNodeCount(nodestate);
-  }
-
-  /**
-   * Returns SCM container manager.
-   */
-  @VisibleForTesting
-  public Mapping getScmContainerManager() {
-    return scmContainerManager;
-  }
-
-  /**
-   * Returns node manager.
-   *
-   * @return - Node Manager
-   */
-  @VisibleForTesting
-  public NodeManager getScmNodeManager() {
-    return scmNodeManager;
-  }
-
-  @VisibleForTesting
-  public BlockManager getScmBlockManager() {
-    return scmBlockManager;
-  }
-
-  public void checkAdminAccess(String remoteUser) throws IOException {
-    if (remoteUser != null) {
-      if (!scmAdminUsernames.contains(remoteUser)) {
-        throw new IOException(
-            "Access denied for user " + remoteUser + ". Superuser privilege " +
-                "is required.");
-      }
-    }
-  }
-
-  /**
-   * Invalidate container stat entry for given datanode.
-   *
-   * @param datanodeUuid
-   */
-  public void removeContainerReport(String datanodeUuid) {
-    synchronized (containerReportCache) {
-      containerReportCache.invalidate(datanodeUuid);
-    }
-  }
-
-  /**
-   * Get container stat of specified datanode.
-   *
-   * @param datanodeUuid
-   * @return
-   */
-  public ContainerStat getContainerReport(String datanodeUuid) {
-    ContainerStat stat = null;
-    synchronized (containerReportCache) {
-      stat = containerReportCache.getIfPresent(datanodeUuid);
-    }
-
-    return stat;
-  }
-
-  /**
-   * Returns a view of the container stat entries. Modifications made to the
-   * map will directly
-   * affect the cache.
-   *
-   * @return
-   */
-  public ConcurrentMap<String, ContainerStat> getContainerReportCache() {
-    return containerReportCache.asMap();
-  }
-
-  @Override
-  public Map<String, String> getContainerReport() {
-    Map<String, String> id2StatMap = new HashMap<>();
-    synchronized (containerReportCache) {
-      ConcurrentMap<String, ContainerStat> map = containerReportCache.asMap();
-      for (Map.Entry<String, ContainerStat> entry : map.entrySet()) {
-        id2StatMap.put(entry.getKey(), entry.getValue().toJsonString());
-      }
-    }
-
-    return id2StatMap;
-  }
-
-  public boolean isInChillMode() {
-    return scmChillModeManager.getInChillMode();
-  }
-
-  /**
-   * Returns EventPublisher.
-   */
-  public EventPublisher getEventQueue(){
-    return eventQueue;
-  }
-
-  /**
-   * Force SCM out of chill mode.
-   */
-  public boolean exitChillMode() {
-    scmChillModeManager.exitChillMode(eventQueue);
-    return true;
-  }
-
-  @VisibleForTesting
-  public double getCurrentContainerThreshold() {
-    return scmChillModeManager.getCurrentContainerThreshold();
-  }
-
-  /**
-   * Startup options.
-   */
-  public enum StartupOption {
-    INIT("-init"),
-    CLUSTERID("-clusterid"),
-    GENCLUSTERID("-genclusterid"),
-    REGULAR("-regular"),
-    HELP("-help");
-
-    private final String name;
-    private String clusterId = null;
-
-    StartupOption(String arg) {
-      this.name = arg;
-    }
-
-    public String getClusterId() {
-      return clusterId;
-    }
-
-    public void setClusterId(String cid) {
-      if (cid != null && !cid.isEmpty()) {
-        clusterId = cid;
-      }
-    }
-
-    public String getName() {
-      return name;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
deleted file mode 100644
index 75b2036..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-
-import java.io.IOException;
-
-/**
- * HttpServer2 wrapper for the Ozone Storage Container Manager.
- */
-public class StorageContainerManagerHttpServer extends BaseHttpServer {
-
-  public StorageContainerManagerHttpServer(Configuration conf)
-      throws IOException {
-    super(conf, "scm");
-  }
-
-  @Override protected String getHttpAddressKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpBindHostKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override protected String getHttpsAddressKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpsBindHostKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override protected String getBindHostDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override protected int getHttpBindPortDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected int getHttpsBindPortDefault() {
-    return ScmConfigKeys.OZONE_SCM_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected String getKeytabFile() {
-    return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE;
-  }
-
-  @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
-  }
-
-  @Override protected String getEnabledKey() {
-    return ScmConfigKeys.OZONE_SCM_HTTP_ENABLED_KEY;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
deleted file mode 100644
index fe07272..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license
- * agreements. See the NOTICE file distributed with this work for additional
- * information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache
- * License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the
- * License. You may obtain a
- * copy of the License at
- *
- * <p>http://www.apache.org/licenses/LICENSE-2.0
- *
- * <p>Unless required by applicable law or agreed to in writing, software
- * distributed under the
- * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.server;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java
deleted file mode 100644
index 2a50bca..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/RetriableDatanodeEventWatcher.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;
-
-import 
org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.CommandStatusEvent;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.server.events.Event;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.EventWatcher;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * EventWatcher for start events and completion events with payload of type
- * RetriablePayload and RetriableCompletionPayload respectively.
- */
-public class RetriableDatanodeEventWatcher<T extends CommandStatusEvent>
-    extends EventWatcher<CommandForDatanode, T> {
-
-  public static final Logger LOG =
-      LoggerFactory.getLogger(RetriableDatanodeEventWatcher.class);
-
-  public RetriableDatanodeEventWatcher(Event<CommandForDatanode> startEvent,
-      Event<T> completionEvent, LeaseManager<Long> leaseManager) {
-    super(startEvent, completionEvent, leaseManager);
-  }
-
-  @Override
-  protected void onTimeout(EventPublisher publisher,
-      CommandForDatanode payload) {
-    LOG.info("RetriableDatanodeCommand type={} with id={} timed out. 
Retrying.",
-        payload.getCommand().getType(), payload.getId());
-    //put back to the original queue
-    publisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, payload);
-  }
-
-  @Override
-  protected void onFinished(EventPublisher publisher,
-      CommandForDatanode payload) {
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
deleted file mode 100644
index b1d2838..0000000
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocol.commands;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
deleted file mode 100644
index 2c943b6..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/index.html
+++ /dev/null
@@ -1,76 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd";>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head 
content must come *after* these tags -->
-    <meta name="description" content="HDFS Storage Container Manager">
-
-    <title>HDFS Storage Container Manager</title>
-
-    <link href="static/bootstrap-3.3.7/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="scm">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" 
data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">HDFS SCM</a>
-        </div>
-
-
-        <navmenu
-                metrics="{ 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
-
-
-    </div>
-</header>
-
-<div class="container-fluid" style="margin: 12pt">
-
-    <ng-view></ng-view>
-
-</div><!-- /.container -->
-
-<script src="static/jquery-3.3.1.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="scm.js"></script>
-<script src="static/bootstrap-3.3.7/js/bootstrap.min.js"></script>
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html
deleted file mode 100644
index 2666f81..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/main.html
+++ /dev/null
@@ -1,20 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-    <scm-overview>
-    </scm-overview>
-</overview>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
deleted file mode 100644
index fca23ba..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm-overview.html
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h2>Node counts</h2>
-
-<table class="table table-bordered table-striped" class="col-md-6">
-    <tbody>
-    <tr ng-repeat="typestat in $ctrl.nodemanagermetrics.NodeCount | 
orderBy:'key':false:$ctrl.nodeOrder">
-        <td>{{typestat.key}}</td>
-        <td>{{typestat.value}}</td>
-    </tr>
-    </tbody>
-</table>
-
-<h2>Status</h2>
-<table class="table table-bordered table-striped" class="col-md-6">
-    <tbody>
-    <tr>
-        <td>Client Rpc port</td>
-        <td>{{$ctrl.overview.jmx.ClientRpcPort}}</td>
-    </tr>
-    <tr>
-        <td>Datanode Rpc port</td>
-        <td>{{$ctrl.overview.jmx.DatanodeRpcPort}}</td>
-    </tr>
-    <tr>
-        <td>Block Manager: Open containers</td>
-        <td>{{$ctrl.blockmanagermetrics.OpenContainersNo}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Minimum chill mode nodes</td>
-        <td>{{$ctrl.nodemanagermetrics.MinimumChillModeNodes}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Out-of-node chill mode</td>
-        <td>{{$ctrl.nodemanagermetrics.OutOfNodeChillMode}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Chill mode status</td>
-        <td>{{$ctrl.nodemanagermetrics.ChillModeStatus}}</td>
-    </tr>
-    <tr>
-        <td>Node Manager: Manual chill mode</td>
-        <td>{{$ctrl.nodemanagermetrics.InManualChillMode}}</td>
-    </tr>
-    </tbody>
-</table>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js 
b/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
deleted file mode 100644
index bcfa8b7..0000000
--- a/hadoop-hdds/server-scm/src/main/resources/webapps/scm/scm.js
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-    angular.module('scm', ['ozone', 'nvd3']);
-
-    angular.module('scm').component('scmOverview', {
-        templateUrl: 'scm-overview.html',
-        require: {
-            overview: "^overview"
-        },
-        controller: function ($http) {
-            var ctrl = this;
-            $http.get("jmx?qry=Hadoop:service=BlockManager,name=*")
-                .then(function (result) {
-                    ctrl.blockmanagermetrics = result.data.beans[0];
-                });
-            
$http.get("jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo")
-                .then(function (result) {
-                    ctrl.nodemanagermetrics = result.data.beans[0];
-                });
-
-            var statusSortOrder = {
-                "HEALTHY": "a",
-                "STALE": "b",
-                "DEAD": "c",
-                "UNKNOWN": "z",
-                "DECOMMISSIONING": "x",
-                "DECOMMISSIONED": "y"
-            };
-            ctrl.nodeOrder = function (v1, v2) {
-                //status with non defined sort order will be "undefined"
-                return ("" + statusSortOrder[v1.value]).localeCompare("" + 
statusSortOrder[v2.value])
-            }
-
-        }
-    });
-
-})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java
deleted file mode 100644
index 6e01e53..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
-
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-
-import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses;
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test the HDDS server side utilities.
- */
-public class HddsServerUtilTest {
-
-  @Rule
-  public Timeout timeout = new Timeout(300000);
-
-  @Rule
-  public ExpectedException thrown= ExpectedException.none();
-
-  /**
-   * Verify DataNode endpoint lookup failure if neither the client nor
-   * datanode endpoint are configured.
-   */
-  @Test
-  public void testMissingScmDataNodeAddress() {
-    final Configuration conf = new OzoneConfiguration();
-    thrown.expect(IllegalArgumentException.class);
-    HddsServerUtil.getScmAddressForDataNodes(conf);
-  }
-
-  /**
-   * Verify that the datanode endpoint is parsed correctly.
-   * This tests the logic used by the DataNodes to determine which address
-   * to connect to.
-   */
-  @Test
-  public void testGetScmDataNodeAddress() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // First try a client address with just a host name. Verify it falls
-    // back to the default port.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // Next try a client address with just a host name and port.
-    // Verify the port is ignored and the default DataNode port is used.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    addr = HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    // Verify that the latter overrides and the port number is still the
-    // default.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8");
-    addr =
-        HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // Set both OZONE_SCM_CLIENT_ADDRESS_KEY and
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    // Verify that the latter overrides and the port number from the latter is
-    // used.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8:200");
-    addr = HddsServerUtil.getScmAddressForDataNodes(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(200));
-  }
-
-
-  /**
-   * Verify that the client endpoint bind address is computed correctly.
-   * This tests the logic used by the SCM to determine its own bind address.
-   */
-  @Test
-  public void testScmClientBindHostDefault() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY
-    // is set differently.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), 
is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY
-    // is set differently. The port number from OZONE_SCM_CLIENT_ADDRESS_KEY
-    // should be respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(100));
-
-    // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected.
-    // Port number should be default if none is specified via
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4");
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-
-    // OZONE_SCM_CLIENT_BIND_HOST_KEY should be respected.
-    // Port number from OZONE_SCM_CLIENT_ADDRESS_KEY should be
-    // respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmClientBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(100));
-  }
-
-  /**
-   * Verify that the DataNode endpoint bind address is computed correctly.
-   * This tests the logic used by the SCM to determine its own bind address.
-   */
-  @Test
-  public void testScmDataNodeBindHostDefault() {
-    final Configuration conf = new OzoneConfiguration();
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY
-    // is set differently.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY
-    // is set differently. The port number from OZONE_SCM_DATANODE_ADDRESS_KEY
-    // should be respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(200));
-
-    // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected.
-    // Port number should be default if none is specified via
-    // OZONE_SCM_DATANODE_ADDRESS_KEY.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(
-        ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    // OZONE_SCM_DATANODE_BIND_HOST_KEY should be respected.
-    // Port number from OZONE_SCM_DATANODE_ADDRESS_KEY should be
-    // respected.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200");
-    conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8");
-    addr = HddsServerUtil.getScmDataNodeBindAddress(conf);
-    assertThat(addr.getHostString(), is("5.6.7.8"));
-    assertThat(addr.getPort(), is(200));
-  }
-
-
-
-  @Test
-  public void testGetSCMAddresses() {
-    final Configuration conf = new OzoneConfiguration();
-    Collection<InetSocketAddress> addresses = null;
-    InetSocketAddress addr = null;
-    Iterator<InetSocketAddress> it = null;
-
-    // Verify valid IP address setup
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "1.2.3.4");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(1));
-    addr = addresses.iterator().next();
-    assertThat(addr.getHostName(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT));
-
-    // Verify valid hostname setup
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(1));
-    addr = addresses.iterator().next();
-    assertThat(addr.getHostName(), is("scm1"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_DEFAULT_PORT));
-
-    // Verify valid hostname and port
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(1));
-    addr = addresses.iterator().next();
-    assertThat(addr.getHostName(), is("scm1"));
-    assertThat(addr.getPort(), is(1234));
-
-    final HashMap<String, Integer> hostsAndPorts =
-        new HashMap<String, Integer>();
-    hostsAndPorts.put("scm1", 1234);
-    hostsAndPorts.put("scm2", 2345);
-    hostsAndPorts.put("scm3", 3456);
-
-    // Verify multiple hosts and port
-    conf.setStrings(
-        ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234,scm2:2345,scm3:3456");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(3));
-    it = addresses.iterator();
-    HashMap<String, Integer> expected1 = new HashMap<>(hostsAndPorts);
-    while(it.hasNext()) {
-      InetSocketAddress current = it.next();
-      assertTrue(expected1.remove(current.getHostName(),
-          current.getPort()));
-    }
-    assertTrue(expected1.isEmpty());
-
-    // Verify names with spaces
-    conf.setStrings(
-        ScmConfigKeys.OZONE_SCM_NAMES, " scm1:1234, scm2:2345 , scm3:3456 ");
-    addresses = getSCMAddresses(conf);
-    assertThat(addresses.size(), is(3));
-    it = addresses.iterator();
-    HashMap<String, Integer> expected2 = new HashMap<>(hostsAndPorts);
-    while(it.hasNext()) {
-      InetSocketAddress current = it.next();
-      assertTrue(expected2.remove(current.getHostName(),
-          current.getPort()));
-    }
-    assertTrue(expected2.isEmpty());
-
-    // Verify empty value
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("Empty value should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-
-    // Verify invalid hostname
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "s..x..:1234");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("An invalid hostname should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-
-    // Verify invalid port
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm:xyz");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("An invalid port should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-
-    // Verify a mixed case (valid and invalid value both appears)
-    conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, "scm1:1234, scm:xyz");
-    try {
-      addresses = getSCMAddresses(conf);
-      fail("An invalid value should cause an IllegalArgumentException");
-    } catch (Exception e) {
-      assertTrue(e instanceof IllegalArgumentException);
-    }
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
deleted file mode 100644
index 50d1eed..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import java.util.ArrayList;
-import java.util.List;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer
-    .NodeRegistrationContainerReport;
-
-/**
- * Stateless helper functions for Hdds tests.
- */
-public final class HddsTestUtils {
-
-  private HddsTestUtils() {
-  }
-
-  /**
-   * Create Command Status report object.
-   *
-   * @param numOfContainers number of containers to be included in report.
-   * @return CommandStatusReportsProto
-   */
-  public static NodeRegistrationContainerReport
-      createNodeRegistrationContainerReport(int numOfContainers) {
-    return new NodeRegistrationContainerReport(
-        TestUtils.randomDatanodeDetails(),
-        TestUtils.getRandomContainerReports(numOfContainers));
-  }
-
-  /**
-   * Create NodeRegistrationContainerReport object.
-   *
-   * @param dnContainers List of containers to be included in report
-   * @return NodeRegistrationContainerReport
-   */
-  public static NodeRegistrationContainerReport
-      createNodeRegistrationContainerReport(List<ContainerInfo> dnContainers) {
-    List<StorageContainerDatanodeProtocolProtos.ContainerInfo>
-        containers = new ArrayList<>();
-    dnContainers.forEach(c -> {
-      containers.add(TestUtils.getRandomContainerInfo(c.getContainerID()));
-    });
-    return new NodeRegistrationContainerReport(
-        TestUtils.randomDatanodeDetails(),
-        TestUtils.getContainerReports(containers));
-  }
-
-  /**
-   * Creates list of ContainerInfo.
-   *
-   * @param numContainers number of ContainerInfo to be included in list.
-   * @return List<ContainerInfo>
-   */
-  public static List<ContainerInfo> getContainerInfo(int numContainers) {
-    List<ContainerInfo> containerInfoList = new ArrayList<>();
-    for (int i = 0; i < numContainers; i++) {
-      ContainerInfo.Builder builder = new ContainerInfo.Builder();
-      containerInfoList.add(builder
-          .setContainerID(RandomUtils.nextLong())
-          .build());
-    }
-    return containerInfoList;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
deleted file mode 100644
index d9e1425..0000000
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpConfig.Policy;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.net.URLConnection;
-import java.util.Arrays;
-import java.util.Collection;
-
-/**
- * Test http server os SCM with various HTTP option.
- */
-@RunWith(value = Parameterized.class)
-public class TestStorageContainerManagerHttpServer {
-  private static final String BASEDIR = GenericTestUtils
-      
.getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName());
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static Configuration conf;
-  private static URLConnectionFactory connectionFactory;
-
-  @Parameters public static Collection<Object[]> policy() {
-    Object[][] params = new Object[][] {
-        {HttpConfig.Policy.HTTP_ONLY},
-        {HttpConfig.Policy.HTTPS_ONLY},
-        {HttpConfig.Policy.HTTP_AND_HTTPS} };
-    return Arrays.asList(params);
-  }
-
-  private final HttpConfig.Policy policy;
-
-  public TestStorageContainerManagerHttpServer(Policy policy) {
-    super();
-    this.policy = policy;
-  }
-
-  @BeforeClass public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    conf = new Configuration();
-    keystoresDir = new File(BASEDIR).getAbsolutePath();
-    sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        TestStorageContainerManagerHttpServer.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
-    connectionFactory =
-        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
-    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getClientSSLConfigFileName());
-    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getServerSSLConfigFileName());
-  }
-
-  @AfterClass public static void tearDown() throws Exception {
-    FileUtil.fullyDelete(new File(BASEDIR));
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-  }
-
-  @Test public void testHttpPolicy() throws Exception {
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-
-    InetSocketAddress.createUnresolved("localhost", 0);
-    StorageContainerManagerHttpServer server = null;
-    try {
-      server = new StorageContainerManagerHttpServer(conf);
-      server.start();
-
-      Assert.assertTrue(implies(policy.isHttpEnabled(),
-          canAccess("http", server.getHttpAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpEnabled(), server.getHttpAddress() == null));
-
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
-          canAccess("https", server.getHttpsAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
-
-    } finally {
-      if (server != null) {
-        server.stop();
-      }
-    }
-  }
-
-  private static boolean canAccess(String scheme, InetSocketAddress addr) {
-    if (addr == null) {
-      return false;
-    }
-    try {
-      URL url =
-          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
-      URLConnection conn = connectionFactory.openConnection(url);
-      conn.connect();
-      conn.getContent();
-    } catch (IOException e) {
-      return false;
-    }
-    return true;
-  }
-
-  private static boolean implies(boolean a, boolean b) {
-    return !a || b;
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to