http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
deleted file mode 100644
index cd25548..0000000
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/TestRDBTableStore.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.utils.db;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.rocksdb.ColumnFamilyOptions;
-import org.rocksdb.DBOptions;
-import org.rocksdb.RocksDB;
-import org.rocksdb.Statistics;
-import org.rocksdb.StatsLevel;
-import org.rocksdb.WriteBatch;
-
-import java.nio.charset.StandardCharsets;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Tests for RocksDBTable Store.
- */
-public class TestRDBTableStore {
-  private static int count = 0;
-  private final List<String> families =
-      Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
-          "First", "Second", "Third",
-          "Fourth", "Fifth",
-          "Sixth");
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-  private RDBStore rdbStore = null;
-  private DBOptions options = null;
-
-  @Before
-  public void setUp() throws Exception {
-    options = new DBOptions();
-    options.setCreateIfMissing(true);
-    options.setCreateMissingColumnFamilies(true);
-
-    Statistics statistics = new Statistics();
-    statistics.setStatsLevel(StatsLevel.ALL);
-    options = options.setStatistics(statistics);
-
-    Set<TableConfig> configSet = new HashSet<>();
-    for(String name : families) {
-      TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
-      configSet.add(newConfig);
-    }
-    rdbStore = new RDBStore(folder.newFolder(), options, configSet);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    if (rdbStore != null) {
-      rdbStore.close();
-    }
-  }
-
-  @Test
-  public void toIOException() {
-  }
-
-  @Test
-  public void getHandle() throws Exception {
-    try (Table testTable = rdbStore.getTable("First")) {
-      Assert.assertNotNull(testTable);
-      Assert.assertNotNull(testTable.getHandle());
-    }
-  }
-
-  @Test
-  public void putGetAndEmpty() throws Exception {
-    try (Table testTable = rdbStore.getTable("First")) {
-      byte[] key =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      byte[] value =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      testTable.put(key, value);
-      Assert.assertFalse(testTable.isEmpty());
-      byte[] readValue = testTable.get(key);
-      Assert.assertArrayEquals(value, readValue);
-    }
-    try (Table secondTable = rdbStore.getTable("Second")) {
-      Assert.assertTrue(secondTable.isEmpty());
-    }
-  }
-
-  @Test
-  public void delete() throws Exception {
-    List<byte[]> deletedKeys = new LinkedList<>();
-    List<byte[]> validKeys = new LinkedList<>();
-    byte[] value =
-        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-    for (int x = 0; x < 100; x++) {
-      deletedKeys.add(
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
-    }
-
-    for (int x = 0; x < 100; x++) {
-      validKeys.add(
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
-    }
-
-    // Write all the keys and delete the keys scheduled for delete.
-    //Assert we find only expected keys in the Table.
-    try (Table testTable = rdbStore.getTable("Fourth")) {
-      for (int x = 0; x < deletedKeys.size(); x++) {
-        testTable.put(deletedKeys.get(x), value);
-        testTable.delete(deletedKeys.get(x));
-      }
-
-      for (int x = 0; x < validKeys.size(); x++) {
-        testTable.put(validKeys.get(x), value);
-      }
-
-      for (int x = 0; x < validKeys.size(); x++) {
-        Assert.assertNotNull(testTable.get(validKeys.get(0)));
-      }
-
-      for (int x = 0; x < deletedKeys.size(); x++) {
-        Assert.assertNull(testTable.get(deletedKeys.get(0)));
-      }
-    }
-  }
-
-  @Test
-  public void writeBatch() throws Exception {
-    WriteBatch batch = new WriteBatch();
-    try (Table testTable = rdbStore.getTable("Fifth")) {
-      byte[] key =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      byte[] value =
-          RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-      batch.put(testTable.getHandle(), key, value);
-      testTable.writeBatch(batch);
-      Assert.assertNotNull(testTable.get(key));
-    }
-    batch.close();
-  }
-
-  private static boolean consume(Table.KeyValue keyValue) {
-    count++;
-    Assert.assertNotNull(keyValue.getKey());
-    return true;
-  }
-
-  @Test
-  public void forEachAndIterator() throws Exception {
-    final int iterCount = 100;
-    try (Table testTable = rdbStore.getTable("Sixth")) {
-      for (int x = 0; x < iterCount; x++) {
-        byte[] key =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        byte[] value =
-            RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8);
-        testTable.put(key, value);
-      }
-      int localCount = 0;
-      try (TableIterator<Table.KeyValue> iter = testTable.iterator()) {
-        while (iter.hasNext()) {
-          Table.KeyValue keyValue = iter.next();
-          localCount++;
-        }
-
-        Assert.assertEquals(iterCount, localCount);
-        iter.seekToFirst();
-        iter.forEachRemaining(TestRDBTableStore::consume);
-        Assert.assertEquals(iterCount, count);
-
-      }
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/package-info.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/package-info.java
deleted file mode 100644
index f06855e..0000000
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/db/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-/**
- * Tests for the DB Utilities.
- */
-package org.apache.hadoop.utils.db;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/test/resources/log4j2.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/resources/log4j2.properties 
b/hadoop-hdds/common/src/test/resources/log4j2.properties
deleted file mode 100644
index cef69e1..0000000
--- a/hadoop-hdds/common/src/test/resources/log4j2.properties
+++ /dev/null
@@ -1,76 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership.  The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# <p>
-# http://www.apache.org/licenses/LICENSE-2.0
-# <p>
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-name=PropertiesConfig
-
-# Checks for config change periodically and reloads
-monitorInterval=5
-
-filter=read, write
-# filter.read.onMatch = DENY avoids logging all READ events
-# filter.read.onMatch = ACCEPT permits logging all READ events
-# The above two settings ignore the log levels in configuration
-# filter.read.onMatch = NEUTRAL permits logging of only those READ events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.read.type = MarkerFilter
-filter.read.marker = READ
-filter.read.onMatch = DENY
-filter.read.onMismatch = NEUTRAL
-
-# filter.write.onMatch = DENY avoids logging all WRITE events
-# filter.write.onMatch = ACCEPT permits logging all WRITE events
-# The above two settings ignore the log levels in configuration
-# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.write.type = MarkerFilter
-filter.write.marker = WRITE
-filter.write.onMatch = NEUTRAL
-filter.write.onMismatch = NEUTRAL
-
-# Log Levels are organized from most specific to least:
-# OFF (most specific, no logging)
-# FATAL (most specific, little data)
-# ERROR
-# WARN
-# INFO
-# DEBUG
-# TRACE (least specific, a lot of data)
-# ALL (least specific, all data)
-
-appenders = console, audit
-appender.console.type = Console
-appender.console.name = STDOUT
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %-5level | %c{1} | %msg%n
-
-appender.audit.type = File
-appender.audit.name = AUDITLOG
-appender.audit.fileName=audit.log
-appender.audit.layout.type=PatternLayout
-appender.audit.layout.pattern= %-5level | %c{1} | %msg%n
-
-loggers=audit
-logger.audit.type=AsyncLogger
-logger.audit.name=OMAudit
-logger.audit.level = INFO
-logger.audit.appenderRefs = audit
-logger.audit.appenderRef.file.ref = AUDITLOG
-
-rootLogger.level = INFO
-rootLogger.appenderRefs = stdout
-rootLogger.appenderRef.stdout.ref = STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/test/resources/test.db.ini
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/resources/test.db.ini 
b/hadoop-hdds/common/src/test/resources/test.db.ini
deleted file mode 100644
index 6666cd2..0000000
--- a/hadoop-hdds/common/src/test/resources/test.db.ini
+++ /dev/null
@@ -1,145 +0,0 @@
-# This is a RocksDB option file.
-#
-# A typical RocksDB options file has four sections, which are
-# Version section, DBOptions section, at least one CFOptions
-# section, and one TableOptions section for each column family.
-# The RocksDB options file in general follows the basic INI
-# file format with the following extensions / modifications:
-#
-#  * Escaped characters
-#    We escaped the following characters:
-#     - \n -- line feed - new line
-#     - \r -- carriage return
-#     - \\ -- backslash \
-#     - \: -- colon symbol :
-#     - \# -- hash tag #
-#  * Comments
-#    We support # style comments.  Comments can appear at the ending
-#    part of a line.
-#  * Statements
-#    A statement is of the form option_name = value.
-#    Each statement contains a '=', where extra white-spaces
-#    are supported. However, we don't support multi-lined statement.
-#    Furthermore, each line can only contain at most one statement.
-#  * Sections
-#    Sections are of the form [SecitonTitle "SectionArgument"],
-#    where section argument is optional.
-#  * List
-#    We use colon-separated string to represent a list.
-#    For instance, n1:n2:n3:n4 is a list containing four values.
-#
-# Below is an example of a RocksDB options file:
-
-
-#----------------------IMPORTANT------------------------------------#
-### FAKE VALUES FOR TESTING ONLY ### DO NOT USE THESE FOR PRODUCTION.
-#----------------------IMPORTANT------------------------------------#
-[DBOptions]
-  stats_dump_period_sec=600
-  max_manifest_file_size=551615
-  bytes_per_sync=8388608
-  delayed_write_rate=2097152
-  WAL_ttl_seconds=0
-  WAL_size_limit_MB=0
-  max_subcompactions=1
-  wal_dir=
-  wal_bytes_per_sync=0
-  db_write_buffer_size=0
-  keep_log_file_num=1000
-  table_cache_numshardbits=4
-  max_file_opening_threads=1
-  writable_file_max_buffer_size=1048576
-  random_access_max_buffer_size=1048576
-  use_fsync=false
-  max_total_wal_size=0
-  max_open_files=-1
-  skip_stats_update_on_db_open=false
-  max_background_compactions=16
-  manifest_preallocation_size=4194304
-  max_background_flushes=7
-  is_fd_close_on_exec=true
-  max_log_file_size=0
-  advise_random_on_open=true
-  create_missing_column_families=false
-  paranoid_checks=true
-  delete_obsolete_files_period_micros=21600000000
-  log_file_time_to_roll=0
-  compaction_readahead_size=0
-  create_if_missing=false
-  use_adaptive_mutex=false
-  enable_thread_tracking=false
-  allow_fallocate=true
-  error_if_exists=false
-  recycle_log_file_num=0
-  skip_log_error_on_recovery=false
-  db_log_dir=
-  new_table_reader_for_compaction_inputs=true
-  allow_mmap_reads=false
-  allow_mmap_writes=false
-  use_direct_reads=false
-  use_direct_writes=false
-
-
-[CFOptions "default"]
-  compaction_style=kCompactionStyleLevel
-  compaction_filter=nullptr
-  num_levels=6
-  table_factory=BlockBasedTable
-  comparator=leveldb.BytewiseComparator
-  max_sequential_skip_in_iterations=8
-  soft_rate_limit=0.000000
-  max_bytes_for_level_base=1073741824
-  memtable_prefix_bloom_probes=6
-  memtable_prefix_bloom_bits=0
-  memtable_prefix_bloom_huge_page_tlb_size=0
-  max_successive_merges=0
-  arena_block_size=16777216
-  min_write_buffer_number_to_merge=1
-  target_file_size_multiplier=1
-  source_compaction_factor=1
-  max_bytes_for_level_multiplier=8
-  max_bytes_for_level_multiplier_additional=2:3:5
-  compaction_filter_factory=nullptr
-  max_write_buffer_number=8
-  level0_stop_writes_trigger=20
-  compression=kSnappyCompression
-  level0_file_num_compaction_trigger=4
-  purge_redundant_kvs_while_flush=true
-  max_write_buffer_number_to_maintain=0
-  memtable_factory=SkipListFactory
-  max_grandparent_overlap_factor=8
-  expanded_compaction_factor=25
-  hard_pending_compaction_bytes_limit=137438953472
-  inplace_update_num_locks=10000
-  level_compaction_dynamic_level_bytes=true
-  level0_slowdown_writes_trigger=12
-  filter_deletes=false
-  verify_checksums_in_compaction=true
-  min_partial_merge_operands=2
-  paranoid_file_checks=false
-  target_file_size_base=134217728
-  optimize_filters_for_hits=false
-  merge_operator=PutOperator
-  
compression_per_level=kNoCompression:kNoCompression:kNoCompression:kSnappyCompression:kSnappyCompression:kSnappyCompression
-  compaction_measure_io_stats=false
-  prefix_extractor=nullptr
-  bloom_locality=0
-  write_buffer_size=134217728
-  disable_auto_compactions=false
-  inplace_update_support=false
-
-[TableOptions/BlockBasedTable "default"]
-  format_version=2
-  whole_key_filtering=true
-  no_block_cache=false
-  checksum=kCRC32c
-  filter_policy=rocksdb.BuiltinBloomFilter
-  block_size_deviation=10
-  block_size=8192
-  block_restart_interval=16
-  cache_index_and_filter_blocks=false
-  pin_l0_filter_and_index_blocks_in_cache=false
-  pin_top_level_index_and_filter=false
-  index_type=kBinarySearch
-  hash_index_allow_collision=true
-  flush_block_policy_factory=FlushBlockBySizePolicyFactory
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
deleted file mode 100644
index 3571a89..0000000
--- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<FindBugsFilter>
-  <Match>
-    <Package name="org.apache.hadoop.hdds.protocol.proto"/>
-  </Match>
-</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/pom.xml 
b/hadoop-hdds/container-service/pom.xml
deleted file mode 100644
index 54c5fad..0000000
--- a/hadoop-hdds/container-service/pom.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0";
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-http://maven.apache.org/xsd/maven-4.0.0.xsd";>
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-hdds</artifactId>
-    <version>0.3.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-hdds-container-service</artifactId>
-  <version>0.3.0-SNAPSHOT</version>
-  <description>Apache Hadoop Distributed Data Store Container 
Service</description>
-  <name>Apache Hadoop HDDS Container Service</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdds-server-framework</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.2.0</version>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.yaml</groupId>
-      <artifactId>snakeyaml</artifactId>
-      <version>1.8</version>
-    </dependency>
-
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-maven-plugins</artifactId>
-        <executions>
-          <execution>
-            <id>compile-protoc</id>
-            <goals>
-              <goal>protoc</goal>
-            </goals>
-            <configuration>
-              <protocVersion>${protobuf.version}</protocVersion>
-              <protocCommand>${protoc.path}</protocCommand>
-              <imports>
-                <param>
-                  
${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto
-                </param>
-                <param>
-                  
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/
-                </param>
-                <param>
-                  
${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/
-                </param>
-                <param>
-                  ${basedir}/../../hadoop-hdds/common/src/main/proto/
-                </param>
-                <param>${basedir}/src/main/proto</param>
-              </imports>
-              <source>
-                <directory>${basedir}/src/main/proto</directory>
-                <includes>
-                  <include>StorageContainerDatanodeProtocol.proto</include>
-                </includes>
-              </source>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>findbugs-maven-plugin</artifactId>
-        <configuration>
-          
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
deleted file mode 100644
index d505be3..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_HEARTBEAT_INTERVAL;
-import static org.apache.hadoop.hdds.HddsConfigKeys
-    .HDDS_HEARTBEAT_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdds.HddsUtils.*;
-import static org.apache.hadoop.hdds.server.ServerUtils.sanitizeUserArgs;
-
-/**
- * Hdds stateless helper functions for server side components.
- */
-public final class HddsServerUtil {
-
-  private HddsServerUtil() {
-  }
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsServerUtil.class);
-
-  /**
-   * Retrieve the socket address that should be used by DataNodes to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getScmAddressForDataNodes(
-      Configuration conf) {
-    // We try the following settings in decreasing priority to retrieve the
-    // target host.
-    // - OZONE_SCM_DATANODE_ADDRESS_KEY
-    // - OZONE_SCM_CLIENT_ADDRESS_KEY
-    //
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY +
-              " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration "
-              + "for details on configuring Ozone.");
-    }
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY);
-
-    InetSocketAddress addr = NetUtils.createSocketAddr(host.get() + ":" +
-        port.or(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-
-    return addr;
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM client endpoint.
-   */
-  public static InetSocketAddress getScmClientBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY);
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT) + ":" +
-            port.or(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to the SCM Block service.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM block client endpoint.
-   */
-  public static InetSocketAddress getScmBlockClientBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY);
-
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT) +
-            ":" + port.or(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by DataNodes to connect
-   * to the SCM.
-   *
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getScmDataNodeBindAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY);
-
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_DEFAULT) + ":" +
-            port.or(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT));
-  }
-
-
-  /**
-   * Returns the interval in which the heartbeat processor thread runs.
-   *
-   * @param conf - Configuration
-   * @return long in Milliseconds.
-   */
-  public static long getScmheartbeatCheckerInterval(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
-        ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Heartbeat Interval - Defines the heartbeat frequency from a datanode to
-   * SCM.
-   *
-   * @param conf - Ozone Config
-   * @return - HB interval in milli seconds.
-   */
-  public static long getScmHeartbeatInterval(Configuration conf) {
-    return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
-        HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Get the Stale Node interval, which is used by SCM to flag a datanode as
-   * stale, if the heartbeat from that node has been missing for this duration.
-   *
-   * @param conf - Configuration.
-   * @return - Long, Milliseconds to wait before flagging a node as stale.
-   */
-  public static long getStaleNodeInterval(Configuration conf) {
-
-    long staleNodeIntervalMs =
-        conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
-            OZONE_SCM_STALENODE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-
-    long heartbeatThreadFrequencyMs = getScmheartbeatCheckerInterval(conf);
-
-    long heartbeatIntervalMs = getScmHeartbeatInterval(conf);
-
-
-    // Make sure that StaleNodeInterval is configured way above the frequency
-    // at which we run the heartbeat thread.
-    //
-    // Here we check that staleNodeInterval is at least five times more than 
the
-    // frequency at which the accounting thread is going to run.
-    try {
-      sanitizeUserArgs(staleNodeIntervalMs, heartbeatThreadFrequencyMs,
-          5, 1000);
-    } catch (IllegalArgumentException ex) {
-      LOG.error("Stale Node Interval is cannot be honored due to " +
-              "mis-configured {}. ex:  {}",
-          OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, ex);
-      throw ex;
-    }
-
-    // Make sure that stale node value is greater than configured value that
-    // datanodes are going to send HBs.
-    try {
-      sanitizeUserArgs(staleNodeIntervalMs, heartbeatIntervalMs, 3, 1000);
-    } catch (IllegalArgumentException ex) {
-      LOG.error("Stale Node Interval MS is cannot be honored due to " +
-          "mis-configured {}. ex:  {}", HDDS_HEARTBEAT_INTERVAL, ex);
-      throw ex;
-    }
-    return staleNodeIntervalMs;
-  }
-
-  /**
-   * Gets the interval for dead node flagging. This has to be a value that is
-   * greater than stale node value,  and by transitive relation we also know
-   * that this value is greater than heartbeat interval and heartbeatProcess
-   * Interval.
-   *
-   * @param conf - Configuration.
-   * @return - the interval for dead node flagging.
-   */
-  public static long getDeadNodeInterval(Configuration conf) {
-    long staleNodeIntervalMs = getStaleNodeInterval(conf);
-    long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL,
-        OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-
-    try {
-      // Make sure that dead nodes Ms is at least twice the time for staleNodes
-      // with a max of 1000 times the staleNodes.
-      sanitizeUserArgs(deadNodeIntervalMs, staleNodeIntervalMs, 2, 1000);
-    } catch (IllegalArgumentException ex) {
-      LOG.error("Dead Node Interval MS is cannot be honored due to " +
-          "mis-configured {}. ex:  {}", OZONE_SCM_STALENODE_INTERVAL, ex);
-      throw ex;
-    }
-    return deadNodeIntervalMs;
-  }
-
-  /**
-   * Timeout value for the RPC from Datanode to SCM, primarily used for
-   * Heartbeats and container reports.
-   *
-   * @param conf - Ozone Config
-   * @return - Rpc timeout in Milliseconds.
-   */
-  public static long getScmRpcTimeOutInMilliseconds(Configuration conf) {
-    return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT,
-        OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-  }
-
-  /**
-   * Log Warn interval.
-   *
-   * @param conf - Ozone Config
-   * @return - Log warn interval.
-   */
-  public static int getLogWarnInterval(Configuration conf) {
-    return conf.getInt(OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT,
-        OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT);
-  }
-
-  /**
-   * returns the Container port.
-   * @param conf - Conf
-   * @return port number.
-   */
-  public static int getContainerPort(Configuration conf) {
-    return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-  }
-
-
-  /**
-   * Return the list of service addresses for the Ozone SCM. This method is 
used
-   * by the DataNodes to determine the service instances to connect to.
-   *
-   * @param conf
-   * @return list of SCM service addresses.
-   */
-  public static Map<String, ? extends Map<String, InetSocketAddress>>
-      getScmServiceRpcAddresses(Configuration conf) {
-
-    final Map<String, InetSocketAddress> serviceInstances = new HashMap<>();
-    serviceInstances.put(OZONE_SCM_SERVICE_INSTANCE_ID,
-        getScmAddressForDataNodes(conf));
-
-    final Map<String, Map<String, InetSocketAddress>> services =
-        new HashMap<>();
-    services.put(OZONE_SCM_SERVICE_ID, serviceInstances);
-    return services;
-  }
-
-  public static String getOzoneDatanodeRatisDirectory(Configuration conf) {
-    final String ratisDir = File.separator + "ratis";
-    String storageDir = conf.get(
-            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
-
-    if (Strings.isNullOrEmpty(storageDir)) {
-      storageDir = conf.get(OzoneConfigKeys
-              .OZONE_METADATA_DIRS);
-      Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " +
-              "cannot be null, Please check your configs.");
-      storageDir = storageDir.concat(ratisDir);
-      LOG.warn("Storage directory for Ratis is not configured." +
-               "Mapping Ratis storage under {}. It is a good idea " +
-               "to map this to an SSD disk.", storageDir);
-    }
-    return storageDir;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java
deleted file mode 100644
index 4e52046..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-/**
- * This is a class that tracks versions of SCM.
- */
-public final class VersionInfo {
-
-  // We will just be normal and use positive counting numbers for versions.
-  private final static VersionInfo[] VERSION_INFOS =
-      {new VersionInfo("First version of SCM", 1)};
-
-
-  public static final String DESCRIPTION_KEY = "Description";
-  private final String description;
-  private final int version;
-
-  /**
-   * Never created outside this class.
-   *
-   * @param description -- description
-   * @param version     -- version number
-   */
-  private VersionInfo(String description, int version) {
-    this.description = description;
-    this.version = version;
-  }
-
-  /**
-   * Returns all versions.
-   *
-   * @return Version info array.
-   */
-  public static VersionInfo[] getAllVersions() {
-    return VERSION_INFOS.clone();
-  }
-
-  /**
-   * Returns the latest version.
-   *
-   * @return versionInfo
-   */
-  public static VersionInfo getLatestVersion() {
-    return VERSION_INFOS[VERSION_INFOS.length - 1];
-  }
-
-  /**
-   * Return description.
-   *
-   * @return String
-   */
-  public String getDescription() {
-    return description;
-  }
-
-  /**
-   * Return the version.
-   *
-   * @return int.
-   */
-  public int getVersion() {
-    return version;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 5905468..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
deleted file mode 100644
index 348196c..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.util.GenericOptionsParser;
-import org.apache.hadoop.util.ServicePlugin;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.List;
-import java.util.UUID;
-
-import static 
org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-/**
- * Datanode service plugin to start the HDDS container services.
- */
-public class HddsDatanodeService implements ServicePlugin {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      HddsDatanodeService.class);
-
-
-  private OzoneConfiguration conf;
-  private DatanodeDetails datanodeDetails;
-  private DatanodeStateMachine datanodeStateMachine;
-  private List<ServicePlugin> plugins;
-
-  /**
-   * Default constructor.
-   */
-  public HddsDatanodeService() {
-    this(null);
-  }
-
-  /**
-   * Constructs {@link HddsDatanodeService} using the provided {@code conf}
-   * value.
-   *
-   * @param conf OzoneConfiguration
-   */
-  public HddsDatanodeService(Configuration conf) {
-    if (conf == null) {
-      this.conf = new OzoneConfiguration();
-    } else {
-      this.conf = new OzoneConfiguration(conf);
-    }
-  }
-
-  /**
-   * Starts HddsDatanode services.
-   *
-   * @param service The service instance invoking this method
-   */
-  @Override
-  public void start(Object service) {
-    OzoneConfiguration.activate();
-    if (service instanceof Configurable) {
-      conf = new OzoneConfiguration(((Configurable) service).getConf());
-    }
-    if (HddsUtils.isHddsEnabled(conf)) {
-      try {
-        String hostname = HddsUtils.getHostName(conf);
-        String ip = InetAddress.getByName(hostname).getHostAddress();
-        datanodeDetails = initializeDatanodeDetails();
-        datanodeDetails.setHostName(hostname);
-        datanodeDetails.setIpAddress(ip);
-        datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf);
-        startPlugins();
-        // Starting HDDS Daemons
-        datanodeStateMachine.startDaemon();
-      } catch (IOException e) {
-        throw new RuntimeException("Can't start the HDDS datanode plugin", e);
-      }
-    }
-  }
-
-  /**
-   * Returns DatanodeDetails or null in case of Error.
-   *
-   * @return DatanodeDetails
-   */
-  private DatanodeDetails initializeDatanodeDetails()
-      throws IOException {
-    String idFilePath = HddsUtils.getDatanodeIdFilePath(conf);
-    if (idFilePath == null || idFilePath.isEmpty()) {
-      LOG.error("A valid file path is needed for config setting {}",
-          ScmConfigKeys.OZONE_SCM_DATANODE_ID);
-      throw new IllegalArgumentException(ScmConfigKeys.OZONE_SCM_DATANODE_ID +
-          " must be defined. See" +
-          " https://wiki.apache.org/hadoop/Ozone#Configuration"; +
-          " for details on configuring Ozone.");
-    }
-
-    Preconditions.checkNotNull(idFilePath);
-    File idFile = new File(idFilePath);
-    if (idFile.exists()) {
-      return ContainerUtils.readDatanodeDetailsFrom(idFile);
-    } else {
-      // There is no datanode.id file, this might be the first time datanode
-      // is started.
-      String datanodeUuid = UUID.randomUUID().toString();
-      return DatanodeDetails.newBuilder().setUuid(datanodeUuid).build();
-    }
-  }
-
-  /**
-   * Starts all the service plugins which are configured using
-   * OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY.
-   */
-  private void startPlugins() {
-    try {
-      plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY,
-          ServicePlugin.class);
-    } catch (RuntimeException e) {
-      String pluginsValue = conf.get(HDDS_DATANODE_PLUGINS_KEY);
-      LOG.error("Unable to load HDDS DataNode plugins. " +
-          "Specified list of plugins: {}",
-          pluginsValue, e);
-      throw e;
-    }
-    for (ServicePlugin plugin : plugins) {
-      try {
-        plugin.start(this);
-        LOG.info("Started plug-in {}", plugin);
-      } catch (Throwable t) {
-        LOG.warn("ServicePlugin {} could not be started", plugin, t);
-      }
-    }
-  }
-
-  /**
-   * Returns the OzoneConfiguration used by this HddsDatanodeService.
-   *
-   * @return OzoneConfiguration
-   */
-  public OzoneConfiguration getConf() {
-    return conf;
-  }
-  /**
-   *
-   * Return DatanodeDetails if set, return null otherwise.
-   *
-   * @return DatanodeDetails
-   */
-  @VisibleForTesting
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-  @VisibleForTesting
-  public DatanodeStateMachine getDatanodeStateMachine() {
-    return datanodeStateMachine;
-  }
-
-  public void join() {
-    try {
-      datanodeStateMachine.join();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.info("Interrupted during StorageContainerManager join.");
-    }
-  }
-
-  @Override
-  public void stop() {
-    if (plugins != null) {
-      for (ServicePlugin plugin : plugins) {
-        try {
-          plugin.stop();
-          LOG.info("Stopped plug-in {}", plugin);
-        } catch (Throwable t) {
-          LOG.warn("ServicePlugin {} could not be stopped", plugin, t);
-        }
-      }
-    }
-    if (datanodeStateMachine != null) {
-      datanodeStateMachine.stopDaemon();
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (plugins != null) {
-      for (ServicePlugin plugin : plugins) {
-        try {
-          plugin.close();
-        } catch (Throwable t) {
-          LOG.warn("ServicePlugin {} could not be closed", plugin, t);
-        }
-      }
-    }
-  }
-
-  public static HddsDatanodeService createHddsDatanodeService(
-      Configuration conf) {
-    return new HddsDatanodeService(conf);
-  }
-
-  public static void main(String[] args) {
-    try {
-      if (DFSUtil.parseHelpArgument(
-          args, "Starts HDDS Datanode", System.out, false)) {
-        System.exit(0);
-      }
-      Configuration conf = new OzoneConfiguration();
-      GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
-      if (!hParser.isParseSuccessful()) {
-        GenericOptionsParser.printGenericCommandUsage(System.err);
-        System.exit(1);
-      }
-      StringUtils.startupShutdownMessage(HddsDatanodeService.class, args, LOG);
-      DefaultMetricsSystem.initialize("HddsDatanode");
-      HddsDatanodeService hddsDatanodeService =
-          createHddsDatanodeService(conf);
-      hddsDatanodeService.start(null);
-      hddsDatanodeService.join();
-    } catch (Throwable e) {
-      LOG.error("Exception in HddsDatanodeService.", e);
-      terminate(1, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
deleted file mode 100644
index 2d58c39..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/DataNodeLayoutVersion.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common;
-
-/**
- * Datanode layout version which describes information about the layout version
- * on the datanode.
- */
-public final class DataNodeLayoutVersion {
-
-  // We will just be normal and use positive counting numbers for versions.
-  private final static DataNodeLayoutVersion[] VERSION_INFOS =
-      {new DataNodeLayoutVersion(1, "HDDS Datanode LayOut Version 1")};
-
-  private final String description;
-  private final int version;
-
-  /**
-   * Never created outside this class.
-   *
-   * @param description -- description
-   * @param version     -- version number
-   */
-  private DataNodeLayoutVersion(int version, String description) {
-    this.description = description;
-    this.version = version;
-  }
-
-  /**
-   * Returns all versions.
-   *
-   * @return Version info array.
-   */
-  public static DataNodeLayoutVersion[] getAllVersions() {
-    return VERSION_INFOS.clone();
-  }
-
-  /**
-   * Returns the latest version.
-   *
-   * @return versionInfo
-   */
-  public static DataNodeLayoutVersion getLatestVersion() {
-    return VERSION_INFOS[VERSION_INFOS.length - 1];
-  }
-
-  /**
-   * Return description.
-   *
-   * @return String
-   */
-  public String getDescription() {
-    return description;
-  }
-
-  /**
-   * Return the version.
-   *
-   * @return int.
-   */
-  public int getVersion() {
-    return version;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
deleted file mode 100644
index 2879001..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableQuantiles;
-import org.apache.hadoop.metrics2.lib.MutableRate;
-
-/**
- *
- * This class is for maintaining  the various Storage Container
- * DataNode statistics and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- *  for example:
- *  <p> {@link #numOps}.inc()
- *
- */
-@InterfaceAudience.Private
-@Metrics(about="Storage Container DataNode Metrics", context="dfs")
-public class ContainerMetrics {
-  @Metric private MutableCounterLong numOps;
-  private MutableCounterLong[] numOpsArray;
-  private MutableCounterLong[] opsBytesArray;
-  private MutableRate[] opsLatency;
-  private MutableQuantiles[][] opsLatQuantiles;
-  private MetricsRegistry registry = null;
-
-  public ContainerMetrics(int[] intervals) {
-    int numEnumEntries = ContainerProtos.Type.values().length;
-    final int len = intervals.length;
-    this.numOpsArray = new MutableCounterLong[numEnumEntries];
-    this.opsBytesArray = new MutableCounterLong[numEnumEntries];
-    this.opsLatency = new MutableRate[numEnumEntries];
-    this.opsLatQuantiles = new MutableQuantiles[numEnumEntries][len];
-    this.registry = new MetricsRegistry("StorageContainerMetrics");
-    for (int i = 0; i < numEnumEntries; i++) {
-      numOpsArray[i] = registry.newCounter(
-          "num" + ContainerProtos.Type.forNumber(i + 1),
-          "number of " + ContainerProtos.Type.forNumber(i + 1) + " ops",
-          (long) 0);
-      opsBytesArray[i] = registry.newCounter(
-          "bytes" + ContainerProtos.Type.forNumber(i + 1),
-          "bytes used by " + ContainerProtos.Type.forNumber(i + 1) + "op",
-          (long) 0);
-      opsLatency[i] = registry.newRate(
-          "latency" + ContainerProtos.Type.forNumber(i + 1),
-          ContainerProtos.Type.forNumber(i + 1) + " op");
-
-      for (int j = 0; j < len; j++) {
-        int interval = intervals[j];
-        String quantileName = ContainerProtos.Type.forNumber(i + 1) + "Nanos"
-            + interval + "s";
-        opsLatQuantiles[i][j] = registry.newQuantiles(quantileName,
-            "latency of Container ops", "ops", "latency", interval);
-      }
-    }
-  }
-
-  public static ContainerMetrics create(Configuration conf) {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    // Percentile measurement is off by default, by watching no intervals
-    int[] intervals =
-             conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
-    return ms.register("StorageContainerMetrics",
-                       "Storage Container Node Metrics",
-                       new ContainerMetrics(intervals));
-  }
-
-  public void incContainerOpsMetrics(ContainerProtos.Type type) {
-    numOps.incr();
-    numOpsArray[type.ordinal()].incr();
-  }
-
-  public long getContainerOpsMetrics(ContainerProtos.Type type){
-    return numOpsArray[type.ordinal()].value();
-  }
-
-  public void incContainerOpsLatencies(ContainerProtos.Type type,
-                                       long latencyNanos) {
-    opsLatency[type.ordinal()].add(latencyNanos);
-    for (MutableQuantiles q: opsLatQuantiles[type.ordinal()]) {
-      q.add(latencyNanos);
-    }
-  }
-
-  public void incContainerBytesStats(ContainerProtos.Type type, long bytes) {
-    opsBytesArray[type.ordinal()].incr(bytes);
-  }
-
-  public long getContainerBytesMetrics(ContainerProtos.Type type){
-    return opsBytesArray[type.ordinal()].value();
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
deleted file mode 100644
index a4c1f2f..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo;
-
-
-/**
- * Container Report iterates the closed containers and sends a container report
- * to SCM.
- */
-public class ContainerReport {
-  private static final int UNKNOWN = -1;
-  private final String finalhash;
-  private long size;
-  private long keyCount;
-  private long bytesUsed;
-  private long readCount;
-  private long writeCount;
-  private long readBytes;
-  private long writeBytes;
-  private long containerID;
-
-  public long getContainerID() {
-    return containerID;
-  }
-
-  public void setContainerID(long containerID) {
-    this.containerID = containerID;
-  }
-
-  /**
-   * Constructs the ContainerReport.
-   *
-   * @param containerID - Container ID.
-   * @param finalhash - Final Hash.
-   */
-  public ContainerReport(long containerID, String finalhash) {
-    this.containerID = containerID;
-    this.finalhash = finalhash;
-    this.size = UNKNOWN;
-    this.keyCount = UNKNOWN;
-    this.bytesUsed = 0L;
-    this.readCount = 0L;
-    this.readBytes = 0L;
-    this.writeCount = 0L;
-    this.writeBytes = 0L;
-  }
-
-  /**
-   * Gets a containerReport from protobuf class.
-   *
-   * @param info - ContainerInfo.
-   * @return - ContainerReport.
-   */
-  public static ContainerReport getFromProtoBuf(ContainerInfo info) {
-    Preconditions.checkNotNull(info);
-    ContainerReport report = new ContainerReport(info.getContainerID(),
-        info.getFinalhash());
-    if (info.hasSize()) {
-      report.setSize(info.getSize());
-    }
-    if (info.hasKeyCount()) {
-      report.setKeyCount(info.getKeyCount());
-    }
-    if (info.hasUsed()) {
-      report.setBytesUsed(info.getUsed());
-    }
-    if (info.hasReadCount()) {
-      report.setReadCount(info.getReadCount());
-    }
-    if (info.hasReadBytes()) {
-      report.setReadBytes(info.getReadBytes());
-    }
-    if (info.hasWriteCount()) {
-      report.setWriteCount(info.getWriteCount());
-    }
-    if (info.hasWriteBytes()) {
-      report.setWriteBytes(info.getWriteBytes());
-    }
-
-    report.setContainerID(info.getContainerID());
-    return report;
-  }
-
-  /**
-   * Returns the final signature for this container.
-   *
-   * @return - hash
-   */
-  public String getFinalhash() {
-    return finalhash;
-  }
-
-  /**
-   * Returns a positive number it is a valid number, -1 if not known.
-   *
-   * @return size or -1
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Sets the size of the container on disk.
-   *
-   * @param size - int
-   */
-  public void setSize(long size) {
-    this.size = size;
-  }
-
-  /**
-   * Gets number of keys in the container if known.
-   *
-   * @return - Number of keys or -1 for not known.
-   */
-  public long getKeyCount() {
-    return keyCount;
-  }
-
-  /**
-   * Sets the key count.
-   *
-   * @param keyCount - Key Count
-   */
-  public void setKeyCount(long keyCount) {
-    this.keyCount = keyCount;
-  }
-
-  public long getReadCount() {
-    return readCount;
-  }
-
-  public void setReadCount(long readCount) {
-    this.readCount = readCount;
-  }
-
-  public long getWriteCount() {
-    return writeCount;
-  }
-
-  public void setWriteCount(long writeCount) {
-    this.writeCount = writeCount;
-  }
-
-  public long getReadBytes() {
-    return readBytes;
-  }
-
-  public void setReadBytes(long readBytes) {
-    this.readBytes = readBytes;
-  }
-
-  public long getWriteBytes() {
-    return writeBytes;
-  }
-
-  public void setWriteBytes(long writeBytes) {
-    this.writeBytes = writeBytes;
-  }
-
-  public long getBytesUsed() {
-    return bytesUsed;
-  }
-
-  public void setBytesUsed(long bytesUsed) {
-    this.bytesUsed = bytesUsed;
-  }
-
-  /**
-   * Gets a containerInfo protobuf message from ContainerReports.
-   *
-   * @return ContainerInfo
-   */
-  public ContainerInfo getProtoBufMessage() {
-    return ContainerInfo.newBuilder()
-        .setKeyCount(this.getKeyCount())
-        .setSize(this.getSize())
-        .setUsed(this.getBytesUsed())
-        .setReadCount(this.getReadCount())
-        .setReadBytes(this.getReadBytes())
-        .setWriteCount(this.getWriteCount())
-        .setWriteBytes(this.getWriteBytes())
-        .setFinalhash(this.getFinalhash())
-        .setContainerID(this.getContainerID())
-        .build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
deleted file mode 100644
index d96849e..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.fs.FileAlreadyExistsException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.nio.file.Paths;
-import org.yaml.snakeyaml.Yaml;
-
-import static org.apache.commons.io.FilenameUtils.removeExtension;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_CHECKSUM_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.NO_SUCH_ALGORITHM;
-import static org.apache.hadoop.ozone.container.common.impl.ContainerData
-    .CHARSET_ENCODING;
-
-/**
- * A set of helper functions to create proper responses.
- */
-public final class ContainerUtils {
-
-  private ContainerUtils() {
-    //never constructed.
-  }
-
-  /**
-   * Returns a Container Command Response Builder with the specified result
-   * and message.
-   * @param request requestProto message.
-   * @param result result of the command.
-   * @param message response message.
-   * @return ContainerCommand Response Builder.
-   */
-  public static ContainerCommandResponseProto.Builder
-      getContainerCommandResponse(
-          ContainerCommandRequestProto request, Result result, String message) 
{
-    return ContainerCommandResponseProto.newBuilder()
-        .setCmdType(request.getCmdType())
-        .setTraceID(request.getTraceID())
-        .setResult(result)
-        .setMessage(message);
-  }
-
-  /**
-   * Returns a Container Command Response Builder. This call is used to build
-   * success responses. Calling function can add other fields to the response
-   * as required.
-   * @param request requestProto message.
-   * @return ContainerCommand Response Builder with result as SUCCESS.
-   */
-  public static ContainerCommandResponseProto.Builder 
getSuccessResponseBuilder(
-      ContainerCommandRequestProto request) {
-    return
-        ContainerCommandResponseProto.newBuilder()
-            .setCmdType(request.getCmdType())
-            .setTraceID(request.getTraceID())
-            .setResult(Result.SUCCESS);
-  }
-
-  /**
-   * Returns a Container Command Response. This call is used for creating null
-   * success responses.
-   * @param request requestProto message.
-   * @return ContainerCommand Response with result as SUCCESS.
-   */
-  public static ContainerCommandResponseProto getSuccessResponse(
-      ContainerCommandRequestProto request) {
-    ContainerCommandResponseProto.Builder builder =
-        getContainerCommandResponse(request, Result.SUCCESS, "");
-    return builder.build();
-  }
-
-  /**
-   * We found a command type but no associated payload for the command. Hence
-   * return malformed Command as response.
-   *
-   * @param request - Protobuf message.
-   * @return ContainerCommandResponseProto - MALFORMED_REQUEST.
-   */
-  public static ContainerCommandResponseProto malformedRequest(
-      ContainerCommandRequestProto request) {
-    return getContainerCommandResponse(request, Result.MALFORMED_REQUEST,
-        "Cmd type does not match the payload.").build();
-  }
-
-  /**
-   * We found a command type that is not supported yet.
-   *
-   * @param request - Protobuf message.
-   * @return ContainerCommandResponseProto - UNSUPPORTED_REQUEST.
-   */
-  public static ContainerCommandResponseProto unsupportedRequest(
-      ContainerCommandRequestProto request) {
-    return getContainerCommandResponse(request, Result.UNSUPPORTED_REQUEST,
-        "Server does not support this command yet.").build();
-  }
-
-  /**
-   * Logs the error and returns a response to the caller.
-   *
-   * @param log - Logger
-   * @param ex - Exception
-   * @param request - Request Object
-   * @return Response
-   */
-  public static ContainerCommandResponseProto logAndReturnError(
-      Logger log, StorageContainerException ex,
-      ContainerCommandRequestProto request) {
-    log.info("Operation: {} : Trace ID: {} : Message: {} : Result: {}",
-        request.getCmdType().name(), request.getTraceID(),
-        ex.getMessage(), ex.getResult().getValueDescriptor().getName());
-    return getContainerCommandResponse(request, ex.getResult(), 
ex.getMessage())
-        .build();
-  }
-
-  /**
-   * get containerName from a container file.
-   *
-   * @param containerFile - File
-   * @return Name of the container.
-   */
-  public static String getContainerNameFromFile(File containerFile) {
-    Preconditions.checkNotNull(containerFile);
-    return Paths.get(containerFile.getParent()).resolve(
-        removeExtension(containerFile.getName())).toString();
-  }
-
-  public static long getContainerIDFromFile(File containerFile) {
-    Preconditions.checkNotNull(containerFile);
-    String containerID = getContainerNameFromFile(containerFile);
-    return Long.parseLong(containerID);
-  }
-
-  /**
-   * Verifies that this is indeed a new container.
-   *
-   * @param containerFile - Container File to verify
-   * @throws IOException
-   */
-  public static void verifyIsNewContainer(File containerFile) throws
-      FileAlreadyExistsException {
-    Logger log = LoggerFactory.getLogger(ContainerSet.class);
-    Preconditions.checkNotNull(containerFile, "containerFile Should not be " +
-        "null");
-    if (containerFile.getParentFile().exists()) {
-      log.error("Container already exists on disk. File: {}", containerFile
-          .toPath());
-      throw new FileAlreadyExistsException("container already exists on " +
-          "disk.");
-    }
-  }
-
-  public static String getContainerDbFileName(String containerName) {
-    return containerName + OzoneConsts.DN_CONTAINER_DB;
-  }
-
-  /**
-   * Persistent a {@link DatanodeDetails} to a local file.
-   *
-   * @throws IOException when read/write error occurs
-   */
-  public synchronized static void writeDatanodeDetailsTo(
-      DatanodeDetails datanodeDetails, File path) throws IOException {
-    if (path.exists()) {
-      if (!path.delete() || !path.createNewFile()) {
-        throw new IOException("Unable to overwrite the datanode ID file.");
-      }
-    } else {
-      if(!path.getParentFile().exists() &&
-          !path.getParentFile().mkdirs()) {
-        throw new IOException("Unable to create datanode ID directories.");
-      }
-    }
-    try (FileOutputStream out = new FileOutputStream(path)) {
-      HddsProtos.DatanodeDetailsProto proto =
-          datanodeDetails.getProtoBufMessage();
-      proto.writeTo(out);
-    }
-  }
-
-  /**
-   * Read {@link DatanodeDetails} from a local ID file.
-   *
-   * @param path ID file local path
-   * @return {@link DatanodeDetails}
-   * @throws IOException If the id file is malformed or other I/O exceptions
-   */
-  public synchronized static DatanodeDetails readDatanodeDetailsFrom(File path)
-      throws IOException {
-    if (!path.exists()) {
-      throw new IOException("Datanode ID file not found.");
-    }
-    try(FileInputStream in = new FileInputStream(path)) {
-      return DatanodeDetails.getFromProtoBuf(
-          HddsProtos.DatanodeDetailsProto.parseFrom(in));
-    } catch (IOException e) {
-      throw new IOException("Failed to parse DatanodeDetails from "
-          + path.getAbsolutePath(), e);
-    }
-  }
-
-  /**
-   * Verify that the checksum stored in containerData is equal to the
-   * computed checksum.
-   * @param containerData
-   * @throws IOException
-   */
-  public static void verifyChecksum(ContainerData containerData)
-      throws IOException {
-    String storedChecksum = containerData.getChecksum();
-
-    Yaml yaml = ContainerDataYaml.getYamlForContainerType(
-        containerData.getContainerType());
-    containerData.computeAndSetChecksum(yaml);
-    String computedChecksum = containerData.getChecksum();
-
-    if (storedChecksum == null || !storedChecksum.equals(computedChecksum)) {
-      throw new StorageContainerException("Container checksum error for " +
-          "ContainerID: " + containerData.getContainerID() + ". " +
-          "\nStored Checksum: " + storedChecksum +
-          "\nExpected Checksum: " + computedChecksum,
-          CONTAINER_CHECKSUM_ERROR);
-    }
-  }
-
-  /**
-   * Return the SHA-256 chesksum of the containerData.
-   * @param containerDataYamlStr ContainerData as a Yaml String
-   * @return Checksum of the container data
-   * @throws StorageContainerException
-   */
-  public static String getChecksum(String containerDataYamlStr)
-      throws StorageContainerException {
-    MessageDigest sha;
-    try {
-      sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-      sha.update(containerDataYamlStr.getBytes(CHARSET_ENCODING));
-      return DigestUtils.sha256Hex(sha.digest());
-    } catch (NoSuchAlgorithmException e) {
-      throw new StorageContainerException("Unable to create Message Digest, " +
-          "usually this is a java configuration issue.", NO_SUCH_ALGORITHM);
-    }
-  }
-
-  /**
-   * Get the .container file from the containerBaseDir.
-   * @param containerBaseDir container base directory. The name of this
-   *                         directory is same as the containerID
-   * @return the .container file
-   */
-  public static File getContainerFile(File containerBaseDir) {
-    // Container file layout is
-    // .../<<containerID>>/metadata/<<containerID>>.container
-    String containerFilePath = OzoneConsts.CONTAINER_META_PATH + File.separator
-        + getContainerID(containerBaseDir) + OzoneConsts.CONTAINER_EXTENSION;
-    return new File(containerBaseDir, containerFilePath);
-  }
-
-  /**
-   * ContainerID can be decoded from the container base directory name.
-   */
-  public static long getContainerID(File containerBaseDir) {
-    return Long.parseLong(containerBaseDir.getName());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
deleted file mode 100644
index 4db6d31..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.util.Properties;
-
-/**
- * This is a utility class which helps to create the version file on datanode
- * and also validate the content of the version file.
- */
-public class DatanodeVersionFile {
-
-  private final String storageId;
-  private final String clusterId;
-  private final String datanodeUuid;
-  private final long cTime;
-  private final int layOutVersion;
-
-  public DatanodeVersionFile(String storageId, String clusterId,
-      String datanodeUuid, long cTime, int layOutVersion) {
-    this.storageId = storageId;
-    this.clusterId = clusterId;
-    this.datanodeUuid = datanodeUuid;
-    this.cTime = cTime;
-    this.layOutVersion = layOutVersion;
-  }
-
-  private Properties createProperties() {
-    Properties properties = new Properties();
-    properties.setProperty(OzoneConsts.STORAGE_ID, storageId);
-    properties.setProperty(OzoneConsts.CLUSTER_ID, clusterId);
-    properties.setProperty(OzoneConsts.DATANODE_UUID, datanodeUuid);
-    properties.setProperty(OzoneConsts.CTIME, String.valueOf(cTime));
-    properties.setProperty(OzoneConsts.LAYOUTVERSION, String.valueOf(
-        layOutVersion));
-    return properties;
-  }
-
-  /**
-   * Creates a version File in specified path.
-   * @param path
-   * @throws IOException
-   */
-  public void createVersionFile(File path) throws
-      IOException {
-    try (RandomAccessFile file = new RandomAccessFile(path, "rws");
-         FileOutputStream out = new FileOutputStream(file.getFD())) {
-      file.getChannel().truncate(0);
-      Properties properties = createProperties();
-      /*
-       * If server is interrupted before this line,
-       * the version file will remain unchanged.
-       */
-      properties.store(out, null);
-    }
-  }
-
-
-  /**
-   * Creates a property object from the specified file content.
-   * @param  versionFile
-   * @return Properties
-   * @throws IOException
-   */
-  public static Properties readFrom(File versionFile) throws IOException {
-    try (RandomAccessFile file = new RandomAccessFile(versionFile, "rws");
-         FileInputStream in = new FileInputStream(file.getFD())) {
-      Properties props = new Properties();
-      props.load(in);
-      return props;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
deleted file mode 100644
index 9d0ec95..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.collect.Maps;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.util.StringUtils;
-
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * A helper class to wrap the info about under deletion container blocks.
- */
-public final class DeletedContainerBlocksSummary {
-
-  private final List<DeletedBlocksTransaction> blocks;
-  // key : txID
-  // value : times of this tx has been processed
-  private final Map<Long, Integer> txSummary;
-  // key : container name
-  // value : the number of blocks need to be deleted in this container
-  // if the message contains multiple entries for same block,
-  // blocks will be merged
-  private final Map<Long, Integer> blockSummary;
-  // total number of blocks in this message
-  private int numOfBlocks;
-
-  private DeletedContainerBlocksSummary(List<DeletedBlocksTransaction> blocks) 
{
-    this.blocks = blocks;
-    txSummary = Maps.newHashMap();
-    blockSummary = Maps.newHashMap();
-    blocks.forEach(entry -> {
-      txSummary.put(entry.getTxID(), entry.getCount());
-      if (blockSummary.containsKey(entry.getContainerID())) {
-        blockSummary.put(entry.getContainerID(),
-            blockSummary.get(entry.getContainerID())
-                + entry.getLocalIDCount());
-      } else {
-        blockSummary.put(entry.getContainerID(), entry.getLocalIDCount());
-      }
-      numOfBlocks += entry.getLocalIDCount();
-    });
-  }
-
-  public static DeletedContainerBlocksSummary getFrom(
-      List<DeletedBlocksTransaction> blocks) {
-    return new DeletedContainerBlocksSummary(blocks);
-  }
-
-  public int getNumOfBlocks() {
-    return numOfBlocks;
-  }
-
-  public int getNumOfContainers() {
-    return blockSummary.size();
-  }
-
-  public String getTXIDs() {
-    return String.join(",", txSummary.keySet()
-        .stream().map(String::valueOf).collect(Collectors.toList()));
-  }
-
-  public String getTxIDSummary() {
-    List<String> txSummaryEntry = txSummary.entrySet().stream()
-        .map(entry -> entry.getKey() + "(" + entry.getValue() + ")")
-        .collect(Collectors.toList());
-    return "[" + String.join(",", txSummaryEntry) + "]";
-  }
-
-  @Override public String toString() {
-    StringBuffer sb = new StringBuffer();
-    for (DeletedBlocksTransaction blks : blocks) {
-      sb.append(" ")
-          .append("TXID=")
-          .append(blks.getTxID())
-          .append(", ")
-          .append("TimesProceed=")
-          .append(blks.getCount())
-          .append(", ")
-          .append(blks.getContainerID())
-          .append(" : [")
-          .append(StringUtils.join(',', blks.getLocalIDList())).append("]")
-          .append("\n");
-    }
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java
deleted file mode 100644
index b03487b..0000000
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyValueContainerReport.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo;
-
-import static java.lang.Math.max;
-
-/**
- * KeyValueContainer Report iterates the closed containers and sends a
- * container report to SCM.
- */
-public class KeyValueContainerReport extends ContainerReport{
-  private long deleteTransactionId;
-
-  /**
-   * Constructs the KeyValueContainerReport.
-   *
-   * @param containerID - Container ID.
-   * @param finalhash - Final Hash.
-   */
-  public KeyValueContainerReport(long containerID, String finalhash) {
-    super(containerID, finalhash);
-    this.deleteTransactionId = 0;
-  }
-
-  /**
-   * Sets the deleteTransactionId if it is greater than existing.
-   * @param transactionId - deleteTransactionId
-   */
-  public void updateDeleteTransactionId(long transactionId) {
-    this.deleteTransactionId = max(transactionId, deleteTransactionId);
-  }
-
-  /**
-   * Gets the deleteTransactionId.
-   * @return - deleteTransactionId.
-   */
-  public long getDeleteTransactionId() {
-    return this.deleteTransactionId;
-  }
-
-  /**
-   * Gets a containerReport from protobuf class.
-   *
-   * @param info - ContainerInfo.
-   * @return - ContainerReport.
-   */
-  public static KeyValueContainerReport getFromProtoBuf(ContainerInfo info) {
-    Preconditions.checkNotNull(info);
-    KeyValueContainerReport report = new KeyValueContainerReport(
-        info.getContainerID(), info.getFinalhash());
-    if (info.hasSize()) {
-      report.setSize(info.getSize());
-    }
-    if (info.hasKeyCount()) {
-      report.setKeyCount(info.getKeyCount());
-    }
-    if (info.hasUsed()) {
-      report.setBytesUsed(info.getUsed());
-    }
-    if (info.hasReadCount()) {
-      report.setReadCount(info.getReadCount());
-    }
-    if (info.hasReadBytes()) {
-      report.setReadBytes(info.getReadBytes());
-    }
-    if (info.hasWriteCount()) {
-      report.setWriteCount(info.getWriteCount());
-    }
-    if (info.hasWriteBytes()) {
-      report.setWriteBytes(info.getWriteBytes());
-    }
-    if (info.hasDeleteTransactionId()) {
-      report.updateDeleteTransactionId(info.getDeleteTransactionId());
-    }
-    report.setContainerID(info.getContainerID());
-    return report;
-  }
-
-  /**
-   * Gets a containerInfo protobuf message from ContainerReports.
-   *
-   * @return ContainerInfo
-   */
-  @Override
-  public ContainerInfo getProtoBufMessage() {
-    return ContainerInfo.newBuilder()
-        .setKeyCount(this.getKeyCount())
-        .setSize(this.getSize())
-        .setUsed(this.getBytesUsed())
-        .setReadCount(this.getReadCount())
-        .setReadBytes(this.getReadBytes())
-        .setWriteCount(this.getWriteCount())
-        .setWriteBytes(this.getWriteBytes())
-        .setFinalhash(this.getFinalhash())
-        .setContainerID(this.getContainerID())
-        .setDeleteTransactionId(this.getDeleteTransactionId())
-        .build();
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to