http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
deleted file mode 100644
index 7be8a62..0000000
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ /dev/null
@@ -1,451 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and Unstable.
- * Please see 
http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * for what changes are allowed for a *Unstable* .proto interface.
- */
-
-// This file contains protocol buffers that are used to transfer data
-// to and from the datanode.
-syntax = "proto2";
-option java_package = "org.apache.hadoop.hdds.protocol.datanode.proto";
-option java_outer_classname = "ContainerProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdds.datanode;
-
-/**
- * Commands that are used to manipulate the state of containers on a datanode.
- *
- * These commands allow us to work against the datanode - from
- * StorageContainer Manager as well as clients.
- *
- *  1. CreateContainer - This call is usually made by Storage Container
- *     manager, when we need to create a new container on a given datanode.
- *
- *  2. ReadContainer - Allows end user to stat a container. For example
- *     this allows us to return the metadata of a container.
- *
- *  3. UpdateContainer - Updates a container metadata.
-
- *  4. DeleteContainer - This call is made to delete a container.
- *
- *  5. ListContainer - Returns the list of containers on this
- *     datanode. This will be used by tests and tools.
- *
- *  6. PutBlock - Given a valid container, creates a block.
- *
- *  7. GetBlock - Allows user to read the metadata of a block.
- *
- *  8. DeleteBlock - Deletes a given block.
- *
- *  9. ListBlock - Returns a list of blocks that are present inside
- *      a given container.
- *
- *  10. ReadChunk - Allows us to read a chunk.
- *
- *  11. DeleteChunk - Delete an unused chunk.
- *
- *  12. WriteChunk - Allows us to write a chunk
- *
- *  13. ListChunk - Given a Container/Block returns the list of Chunks.
- *
- *  14. CompactChunk - Re-writes a chunk based on Offsets.
- *
- *  15. PutSmallFile - A single RPC that combines both putBlock and WriteChunk.
- *
- *  16. GetSmallFile - A single RPC that combines both getBlock and ReadChunk.
- *
- *  17. CloseContainer - Closes an open container and makes it immutable.
- *
- *  18. CopyContainer - Copies a container from a remote machine.
- */
-
-enum Type {
-  CreateContainer = 1;
-  ReadContainer = 2;
-  UpdateContainer = 3;
-  DeleteContainer = 4;
-  ListContainer = 5;
-
-  PutBlock = 6;
-  GetBlock = 7;
-  DeleteBlock = 8;
-  ListBlock = 9;
-
-  ReadChunk = 10;
-  DeleteChunk = 11;
-  WriteChunk = 12;
-  ListChunk = 13;
-  CompactChunk = 14;
-
-  /** Combines Block and Chunk Operation into Single RPC. */
-  PutSmallFile = 15;
-  GetSmallFile = 16;
-  CloseContainer = 17;
-  GetCommittedBlockLength = 18;
-}
-
-
-enum Result {
-  SUCCESS = 1;
-  UNSUPPORTED_REQUEST = 2;
-  MALFORMED_REQUEST = 3;
-  CONTAINER_INTERNAL_ERROR = 4;
-  INVALID_CONFIG = 5;
-  INVALID_FILE_HASH_FOUND = 6;
-  CONTAINER_EXISTS = 7;
-  NO_SUCH_ALGORITHM = 8;
-  CONTAINER_NOT_FOUND = 9;
-  IO_EXCEPTION = 10;
-  UNABLE_TO_READ_METADATA_DB = 11;
-  NO_SUCH_BLOCK = 12;
-  OVERWRITE_FLAG_REQUIRED = 13;
-  UNABLE_TO_FIND_DATA_DIR = 14;
-  INVALID_WRITE_SIZE = 15;
-  CHECKSUM_MISMATCH = 16;
-  UNABLE_TO_FIND_CHUNK = 17;
-  PROTOC_DECODING_ERROR = 18;
-  INVALID_ARGUMENT = 19;
-  PUT_SMALL_FILE_ERROR = 20;
-  GET_SMALL_FILE_ERROR = 21;
-  CLOSED_CONTAINER_IO = 22;
-  ERROR_CONTAINER_NOT_EMPTY = 23;
-  ERROR_IN_COMPACT_DB = 24;
-  UNCLOSED_CONTAINER_IO = 25;
-  DELETE_ON_OPEN_CONTAINER = 26;
-  CLOSED_CONTAINER_RETRY = 27;
-  INVALID_CONTAINER_STATE = 28;
-  DISK_OUT_OF_SPACE = 29;
-  CONTAINER_ALREADY_EXISTS = 30;
-  CONTAINER_METADATA_ERROR = 31;
-  CONTAINER_FILES_CREATE_ERROR = 32;
-  CONTAINER_CHECKSUM_ERROR = 33;
-  UNKNOWN_CONTAINER_TYPE = 34;
-  BLOCK_NOT_COMMITTED = 35;
-}
-
-/**
- * Block ID that uniquely identify a block in Datanode.
- */
-message DatanodeBlockID {
-  required int64 containerID = 1;
-  required int64 localID = 2;
-}
-
-message KeyValue {
-  required string key = 1;
-  optional string value = 2;
-}
-
-/**
- * Lifecycle states of a container in Datanode.
- */
-enum ContainerLifeCycleState {
-    OPEN = 1;
-    CLOSING = 2;
-    CLOSED = 3;
-    INVALID = 4;
-}
-
-message ContainerCommandRequestProto {
-  required   Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional   string traceID = 2;
-
-  required   int64 containerID = 3;
-  required   string datanodeUuid = 4;
-
-  // One of the following command is available when the corresponding
-  // cmdType is set. At the protocol level we allow only
-  // one command in each packet.
-  // TODO : Upgrade to Protobuf 2.6 or later.
-  optional   CreateContainerRequestProto createContainer = 5;
-  optional   ReadContainerRequestProto readContainer = 6;
-  optional   UpdateContainerRequestProto updateContainer = 7;
-  optional   DeleteContainerRequestProto deleteContainer = 8;
-  optional   ListContainerRequestProto listContainer = 9;
-  optional   CloseContainerRequestProto closeContainer = 10;
-
-  optional   PutBlockRequestProto putBlock = 11;
-  optional   GetBlockRequestProto getBlock = 12;
-  optional   DeleteBlockRequestProto deleteBlock = 13;
-  optional   ListBlockRequestProto listBlock = 14;
-
-  optional   ReadChunkRequestProto readChunk = 15;
-  optional   WriteChunkRequestProto writeChunk = 16;
-  optional   DeleteChunkRequestProto deleteChunk = 17;
-  optional   ListChunkRequestProto listChunk = 18;
-
-  optional   PutSmallFileRequestProto putSmallFile = 19;
-  optional   GetSmallFileRequestProto getSmallFile = 20;
-
-  optional   GetCommittedBlockLengthRequestProto getCommittedBlockLength = 21;
-}
-
-message ContainerCommandResponseProto {
-  required   Type cmdType = 1;
-  optional   string traceID = 2;
-
-  required   Result result = 3;
-  optional   string message = 4;
-
-  optional   CreateContainerResponseProto createContainer = 5;
-  optional   ReadContainerResponseProto readContainer = 6;
-  optional   UpdateContainerResponseProto updateContainer = 7;
-  optional   DeleteContainerResponseProto deleteContainer = 8;
-  optional   ListContainerResponseProto listContainer = 9;
-  optional   CloseContainerResponseProto closeContainer = 10;
-
-  optional   PutBlockResponseProto putBlock = 11;
-  optional   GetBlockResponseProto getBlock = 12;
-  optional   DeleteBlockResponseProto deleteBlock = 13;
-  optional   ListBlockResponseProto listBlock = 14;
-
-  optional   WriteChunkResponseProto writeChunk = 15;
-  optional   ReadChunkResponseProto readChunk = 16;
-  optional   DeleteChunkResponseProto deleteChunk = 17;
-  optional   ListChunkResponseProto listChunk = 18;
-
-  optional   PutSmallFileResponseProto putSmallFile = 19;
-  optional   GetSmallFileResponseProto getSmallFile = 20;
-
-  optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21;
-}
-
-message ContainerData {
-  required int64 containerID = 1;
-  repeated KeyValue metadata = 2;
-  optional string containerPath = 4;
-  optional int64 bytesUsed = 6;
-  optional int64 size = 7;
-  optional int64 blockCount = 8;
-  optional ContainerLifeCycleState state = 9 [default = OPEN];
-  optional ContainerType containerType = 10 [default = KeyValueContainer];
-}
-
-enum ContainerType {
-  KeyValueContainer = 1;
-}
-
-
-// Container Messages.
-message  CreateContainerRequestProto {
-  repeated KeyValue metadata = 2;
-  optional ContainerType containerType = 3 [default = KeyValueContainer];
-}
-
-message  CreateContainerResponseProto {
-}
-
-message  ReadContainerRequestProto {
-}
-
-message  ReadContainerResponseProto {
-  optional ContainerData containerData = 1;
-}
-
-message  UpdateContainerRequestProto {
-  repeated KeyValue metadata = 2;
-  optional bool forceUpdate = 3 [default = false];
-}
-
-message  UpdateContainerResponseProto {
-}
-
-message  DeleteContainerRequestProto {
-  optional bool forceDelete = 2 [default = false];
-}
-
-message  DeleteContainerResponseProto {
-}
-
-message  ListContainerRequestProto {
-  optional uint32 count = 2; // Max Results to return
-}
-
-message  ListContainerResponseProto {
-  repeated ContainerData containerData = 1;
-}
-
-message CloseContainerRequestProto {
-}
-
-message CloseContainerResponseProto {
-  optional string hash = 1;
-  optional int64 containerID = 2;
-}
-
-message BlockData {
-  required DatanodeBlockID blockID = 1;
-  optional int64 flags = 2; // for future use.
-  repeated KeyValue metadata = 3;
-  repeated ChunkInfo chunks = 4;
-  optional int64 size = 5;
-}
-
-// Block Messages.
-message  PutBlockRequestProto {
-  required BlockData blockData = 1;
-}
-
-message  PutBlockResponseProto {
-  required GetCommittedBlockLengthResponseProto committedBlockLength = 1;
-}
-
-message  GetBlockRequestProto  {
-  required DatanodeBlockID blockID = 1;
-}
-
-message  GetBlockResponseProto  {
-  required BlockData blockData = 1;
-}
-
-
-message  DeleteBlockRequestProto {
-  required DatanodeBlockID blockID = 1;
-}
-
-message  GetCommittedBlockLengthRequestProto {
-  required DatanodeBlockID blockID = 1;
-}
-
-message  GetCommittedBlockLengthResponseProto {
-  required DatanodeBlockID blockID = 1;
-  required int64 blockLength = 2;
-}
-
-message   DeleteBlockResponseProto {
-}
-
-message  ListBlockRequestProto {
-  optional int64 startLocalID = 2;
-  required uint32 count = 3;
-
-}
-
-message  ListBlockResponseProto {
-  repeated BlockData blockData = 1;
-}
-
-// Chunk Operations
-
-message ChunkInfo {
-  required string chunkName = 1;
-  required uint64 offset = 2;
-  required uint64 len = 3;
-  optional string checksum = 4;
-  repeated KeyValue metadata = 5;
-}
-
-enum Stage {
-    WRITE_DATA = 1;
-    COMMIT_DATA = 2;
-    COMBINED = 3;
-}
-
-message  WriteChunkRequestProto  {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-  optional bytes data = 3;
-  optional Stage stage = 4 [default = COMBINED];
-}
-
-message  WriteChunkResponseProto {
-}
-
-message  ReadChunkRequestProto  {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-}
-
-message  ReadChunkResponseProto {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-  required bytes data = 3;
-}
-
-message  DeleteChunkRequestProto {
-  required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
-}
-
-message  DeleteChunkResponseProto {
-}
-
-message  ListChunkRequestProto {
-  required DatanodeBlockID blockID = 1;
-  required string prevChunkName = 2;
-  required uint32 count = 3;
-}
-
-message  ListChunkResponseProto {
-  repeated ChunkInfo chunkData = 1;
-}
-
-/** For small file access combines write chunk and putBlock into a single
-RPC */
-
-message PutSmallFileRequestProto {
-  required PutBlockRequestProto block = 1;
-  required ChunkInfo chunkInfo = 2;
-  required bytes data = 3;
-}
-
-
-message PutSmallFileResponseProto {
-
-}
-
-message GetSmallFileRequestProto {
-  required GetBlockRequestProto block = 1;
-}
-
-message GetSmallFileResponseProto {
-  required ReadChunkResponseProto data = 1;
-}
-
-message CopyContainerRequestProto {
-  required int64 containerID = 1;
-  required uint64 readOffset = 2;
-  optional uint64 len = 3;
-}
-
-message CopyContainerResponseProto {
-  required int64 containerID = 1;
-  required uint64 readOffset = 2;
-  required uint64 len = 3;
-  required bool eof = 4;
-  required bytes data = 5;
-  optional int64 checksum = 6;
-}
-
-service XceiverClientProtocolService {
-  // A client-to-datanode RPC to send container commands
-  rpc send(stream ContainerCommandRequestProto) returns
-    (stream ContainerCommandResponseProto) {};
-
-}
-
-service IntraDatanodeProtocolService {
-  // An intradatanode service to copy the raw containerdata betwen nodes
-  rpc download (CopyContainerRequestProto) returns (stream 
CopyContainerResponseProto);
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
deleted file mode 100644
index 9b4e0ac..0000000
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-option java_outer_classname = "ScmBlockLocationProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdds;
-
-import "hdfs.proto";
-import "hdds.proto";
-
-
-// SCM Block protocol
-
-/**
-* Request send to SCM asking allocate block of specified size.
-*/
-message AllocateScmBlockRequestProto {
-  required uint64 size = 1;
-  required ReplicationType type = 2;
-  required hadoop.hdds.ReplicationFactor factor = 3;
-  required string owner = 4;
-
-}
-
-/**
- * A delete key request sent by OM to SCM, it contains
- * multiple number of keys (and their blocks).
- */
-message DeleteScmKeyBlocksRequestProto {
-  repeated KeyBlocks keyBlocks = 1;
-}
-
-/**
- * A object key and all its associated blocks.
- * We need to encapsulate object key name plus the blocks in this potocol
- * because SCM needs to response OM with the keys it has deleted.
- * If the response only contains blocks, it will be very expensive for
- * OM to figure out what keys have been deleted.
- */
-message KeyBlocks {
-  required string key = 1;
-  repeated BlockID blocks = 2;
-}
-
-/**
- * A delete key response from SCM to OM, it contains multiple child-results.
- * Each child-result represents a key deletion result, only if all blocks of
- * a key are successfully deleted, this key result is considered as succeed.
- */
-message DeleteScmKeyBlocksResponseProto {
-  repeated DeleteKeyBlocksResultProto results = 1;
-}
-
-/**
- * A key deletion result. It contains all the block deletion results.
- */
-message DeleteKeyBlocksResultProto {
-  required string objectKey = 1;
-  repeated DeleteScmBlockResult blockResults = 2;
-}
-
-message DeleteScmBlockResult {
-  enum Result {
-    success = 1;
-    chillMode = 2;
-    errorNotFound = 3;
-    unknownFailure = 4;
-  }
-  required Result result = 1;
-  required BlockID blockID = 2;
-}
-
-/**
- * Reply from SCM indicating that the container.
- */
-message AllocateScmBlockResponseProto {
-  enum Error {
-    success = 1;
-    errorNotEnoughSpace = 2;
-    errorSizeTooBig = 3;
-    unknownFailure = 4;
-  }
-  required Error errorCode = 1;
-  optional BlockID blockID = 2;
-  optional hadoop.hdds.Pipeline pipeline = 3;
-  optional bool createContainer = 4;
-  optional string errorMessage = 5;
-}
-
-/**
- * Protocol used from OzoneManager to StorageContainerManager.
- * See request and response messages for details of the RPC calls.
- */
-service ScmBlockLocationProtocolService {
-
-  /**
-   * Creates a block entry in SCM.
-   */
-  rpc allocateScmBlock(AllocateScmBlockRequestProto)
-      returns (AllocateScmBlockResponseProto);
-
-  /**
-   * Deletes blocks for a set of object keys from SCM.
-   */
-  rpc deleteScmKeyBlocks(DeleteScmKeyBlocksRequestProto)
-      returns (DeleteScmKeyBlocksResponseProto);
-
-  /**
-   * Gets the scmInfo from SCM.
-   */
-  rpc getScmInfo(hadoop.hdds.GetScmInfoRequestProto)
-      returns (hadoop.hdds.GetScmInfoRespsonseProto);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto 
b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
deleted file mode 100644
index fb01d6a..0000000
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-option java_outer_classname = "StorageContainerLocationProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdds;
-
-import "hdfs.proto";
-import "hdds.proto";
-
-/**
-* Request send to SCM asking where the container should be created.
-*/
-message ContainerRequestProto {
-  // Ozone only support replciation of either 1 or 3.
-  required ReplicationFactor replicationFactor = 2;
-  required ReplicationType  replicationType = 3;
-  required string owner = 4;
-
-}
-
-/**
- * Reply from SCM indicating that the container.
- */
-message ContainerResponseProto {
-  enum Error {
-    success = 1;
-    errorContainerAlreadyExists = 2;
-    errorContainerMissing = 3;
-  }
-  required Error errorCode = 1;
-  required ContainerWithPipeline containerWithPipeline = 2;
-  optional string errorMessage = 3;
-}
-
-message GetContainerRequestProto {
-  required int64 containerID = 1;
-}
-
-message GetContainerResponseProto {
-  required SCMContainerInfo containerInfo = 1;
-}
-
-message GetContainerWithPipelineRequestProto {
-  required int64 containerID = 1;
-}
-
-message GetContainerWithPipelineResponseProto {
-  required ContainerWithPipeline containerWithPipeline = 1;
-}
-
-message SCMListContainerRequestProto {
-  required uint32 count = 1;
-  optional uint64 startContainerID = 2;
- }
-
-message SCMListContainerResponseProto {
-  repeated SCMContainerInfo containers = 1;
-}
-
-message SCMDeleteContainerRequestProto {
-  required int64 containerID = 1;
-}
-
-message SCMDeleteContainerResponseProto {
-  // Empty response
-}
-
-message ObjectStageChangeRequestProto {
-  enum Type {
-    container = 1;
-    pipeline = 2;
-  }
-  // delete/copy operation may be added later
-  enum Op {
-    create = 1;
-    close = 2;
-  }
-  enum Stage {
-    begin = 1;
-    complete = 2;
-  }
-  required int64 id = 1;
-  required Type type = 2;
-  required Op op= 3;
-  required Stage stage = 4;
-}
-
-message ObjectStageChangeResponseProto {
-  // Empty response
-}
-
-/*
- NodeQueryRequest sends a request to SCM asking to send a list of nodes that
- match the NodeState that we are requesting.
-*/
-message NodeQueryRequestProto {
-  required NodeState state = 1;
-  required QueryScope scope = 2;
-  optional string poolName = 3; // if scope is pool, then pool name is needed.
-}
-
-message NodeQueryResponseProto {
-  repeated Node datanodes = 1;
-}
-
-/**
-  Request to create a replication pipeline.
- */
-message PipelineRequestProto {
-  required ReplicationType replicationType = 1;
-  required ReplicationFactor replicationFactor = 2;
-
-  // if datanodes are specified then pipelines are created using those
-  // datanodes.
-  optional NodePool nodePool = 3;
-  optional string pipelineID = 4;
-}
-
-message  PipelineResponseProto {
-  enum Error {
-    success = 1;
-    errorPipelineAlreadyExists = 2;
-  }
-  required Error errorCode = 1;
-  optional Pipeline  pipeline = 2;
-  optional string errorMessage = 3;
-}
-
-
-message InChillModeRequestProto {
-}
-
-message InChillModeResponseProto {
-  required bool inChillMode = 1;
-}
-
-message ForceExitChillModeRequestProto {
-}
-
-message ForceExitChillModeResponseProto {
-  required bool exitedChillMode = 1;
-}
-
-/**
- * Protocol used from an HDFS node to StorageContainerManager.  See the request
- * and response messages for details of the RPC calls.
- */
-service StorageContainerLocationProtocolService {
-
-  /**
-   * Creates a container entry in SCM.
-   */
-  rpc allocateContainer(ContainerRequestProto) returns 
(ContainerResponseProto);
-
-  /**
-   * Returns the pipeline for a given container.
-   */
-  rpc getContainer(GetContainerRequestProto) returns 
(GetContainerResponseProto);
-
-  /**
-   * Returns the pipeline for a given container.
-   */
-  rpc getContainerWithPipeline(GetContainerWithPipelineRequestProto) returns 
(GetContainerWithPipelineResponseProto);
-
-  rpc listContainer(SCMListContainerRequestProto) returns 
(SCMListContainerResponseProto);
-
-  /**
-   * Deletes a container in SCM.
-   */
-  rpc deleteContainer(SCMDeleteContainerRequestProto) returns 
(SCMDeleteContainerResponseProto);
-
-  /**
-  * Returns a set of Nodes that meet a criteria.
-  */
-  rpc queryNode(NodeQueryRequestProto) returns (NodeQueryResponseProto);
-
-  /**
-  * Notify from client when begin or finish container or pipeline operations 
on datanodes.
-  */
-  rpc notifyObjectStageChange(ObjectStageChangeRequestProto) returns 
(ObjectStageChangeResponseProto);
-
-  /*
-  *  Apis that Manage Pipelines.
-  *
-  * Pipelines are abstractions offered by SCM and Datanode that allows users
-  * to create a replication pipeline.
-  *
-  *  These following APIs allow command line programs like SCM CLI to list
-  * and manage pipelines.
-  */
-
-  /**
-  *  Creates a replication pipeline.
-  */
-  rpc allocatePipeline(PipelineRequestProto)
-      returns (PipelineResponseProto);
-
-  /**
-  *  Returns information about SCM.
-  */
-  rpc getScmInfo(GetScmInfoRequestProto)
-      returns (GetScmInfoRespsonseProto);
-
-  /**
-  *  Checks if SCM is in ChillMode.
-  */
-  rpc inChillMode(InChillModeRequestProto)
-  returns (InChillModeResponseProto);
-
-  /**
-  *  Returns information about SCM.
-  */
-  rpc forceExitChillMode(ForceExitChillModeRequestProto)
-  returns (ForceExitChillModeResponseProto);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto 
b/hadoop-hdds/common/src/main/proto/hdds.proto
deleted file mode 100644
index dedc57b..0000000
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.hdds.protocol.proto";
-option java_outer_classname = "HddsProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.hdds;
-
-message DatanodeDetailsProto {
-    required string uuid = 1;  // UUID assigned to the Datanode.
-    required string ipAddress = 2;     // IP address
-    required string hostName = 3;      // hostname
-    repeated Port ports = 4;
-}
-
-message Port {
-    required string name = 1;
-    required uint32 value = 2;
-}
-
-message PipelineID {
-  required string id = 1;
-}
-
-message Pipeline {
-    required string leaderID = 1;
-    repeated DatanodeDetailsProto members = 2;
-    optional LifeCycleState state = 3 [default = OPEN];
-    optional ReplicationType type = 4 [default = STAND_ALONE];
-    optional ReplicationFactor factor = 5 [default = ONE];
-    required PipelineID id = 6;
-}
-
-message KeyValue {
-    required string key = 1;
-    optional string value = 2;
-}
-
-/**
- * Type of the node.
- */
-enum NodeType {
-    OM = 1;         // Ozone Manager
-    SCM = 2;        // Storage Container Manager
-    DATANODE = 3;   // DataNode
-}
-
-// Should we rename NodeState to DatanodeState?
-/**
- * Enum that represents the Node State. This is used in calls to getNodeList
- * and getNodeCount.
- */
-enum NodeState {
-    HEALTHY = 1;
-    STALE = 2;
-    DEAD = 3;
-    DECOMMISSIONING = 4;
-    DECOMMISSIONED = 5;
-}
-
-enum QueryScope {
-    CLUSTER = 1;
-    POOL = 2;
-}
-
-message Node {
-    required DatanodeDetailsProto nodeID = 1;
-    repeated NodeState nodeStates = 2;
-}
-
-message NodePool {
-    repeated Node nodes = 1;
-}
-
-/**
- * LifeCycleState for SCM object creation state machine:
- *    ->Allocated: allocated on SCM but clean has not started creating it yet.
- *    ->Creating: allocated and assigned to client to create but not ack-ed 
yet.
- *    ->Open: allocated on SCM and created on datanodes and ack-ed by a client.
- *    ->Close: container closed due to space all used or error?
- *    ->Timeout -> container failed to create on datanodes or ack-ed by client.
- *    ->Deleting(TBD) -> container will be deleted after timeout
- * 1. ALLOCATE-ed containers on SCM can't serve key/block related operation
- *    until ACK-ed explicitly which changes the state to OPEN.
- * 2. Only OPEN/CLOSED containers can serve key/block related operation.
- * 3. ALLOCATE-ed containers that are not ACK-ed timely will be TIMEOUT and
- *    CLEANUP asynchronously.
- */
-
-enum LifeCycleState {
-    ALLOCATED = 1;
-    CREATING = 2; // Used for container allocated/created by different client.
-    OPEN =3; // Mostly an update to SCM via HB or client call.
-    CLOSING = 4;
-    CLOSED = 5; // !!State after this has not been used yet.
-    DELETING = 6;
-    DELETED = 7; // object is deleted.
-}
-
-enum LifeCycleEvent {
-    CREATE = 1; // A request to client to create this object
-    CREATED = 2;
-    FINALIZE = 3;
-    CLOSE = 4; // !!Event after this has not been used yet.
-    UPDATE = 5;
-    TIMEOUT = 6; // creation has timed out from SCM's View.
-    DELETE = 7;
-    CLEANUP = 8;
-}
-
-message SCMContainerInfo {
-    required int64 containerID = 1;
-    required LifeCycleState state = 2;
-    optional PipelineID pipelineID = 3;
-    // This is not total size of container, but space allocated by SCM for
-    // clients to write blocks
-    required uint64 allocatedBytes = 4;
-    required uint64 usedBytes = 5;
-    required uint64 numberOfKeys = 6;
-    optional int64 stateEnterTime = 7;
-    required string owner = 8;
-    optional int64 deleteTransactionId = 9;
-    required ReplicationFactor replicationFactor  = 10;
-    required ReplicationType replicationType  = 11;
-}
-
-message ContainerWithPipeline {
-  required SCMContainerInfo containerInfo = 1;
-  required Pipeline pipeline = 2;
-}
-
-message GetScmInfoRequestProto {
-}
-
-message GetScmInfoRespsonseProto {
-    required string clusterId = 1;
-    required string scmId = 2;
-}
-
-
-enum ReplicationType {
-    RATIS = 1;
-    STAND_ALONE = 2;
-    CHAINED = 3;
-}
-
-enum ReplicationFactor {
-    ONE = 1;
-    THREE = 3;
-}
-
-enum ScmOps {
-    allocateBlock = 1;
-    keyBlocksInfoList = 2;
-    getScmInfo = 3;
-    deleteBlock = 4;
-    createReplicationPipeline = 5;
-    allocateContainer = 6;
-    getContainer = 7;
-    getContainerWithPipeline = 8;
-    listContainer = 9;
-    deleteContainer = 10;
-    queryNode = 11;
-}
-
-/**
- * Block ID that uniquely identify a block by SCM.
- */
-message BlockID {
-    required int64 containerID = 1;
-    required int64 localID = 2;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties 
b/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
deleted file mode 100644
index 2cbd817..0000000
--- a/hadoop-hdds/common/src/main/resources/hdds-version-info.properties
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-version=${declared.hdds.version}
-revision=${version-info.scm.commit}
-branch=${version-info.scm.branch}
-user=${user.name}
-date=${version-info.build.time}
-url=${version-info.scm.uri}
-srcChecksum=${version-info.source.md5}
-protocVersion=${protobuf.version}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
deleted file mode 100644
index b7c967d..0000000
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ /dev/null
@@ -1,1299 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Do not modify this file directly.  Instead, copy entries that you -->
-<!-- wish to modify from this file into ozone-site.xml and change them -->
-<!-- there.  If ozone-site.xml does not already exist, create it.      -->
-
-<!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE,   -->
-<!--DEBUG, CLIENT, SERVER, OM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
-<!--REST, STORAGE, PIPELINE, STANDALONE                                    -->
-
-<configuration>
-
-  <!--Container Settings used by Datanode-->
-  <property>
-    <name>ozone.container.cache.size</name>
-    <value>1024</value>
-    <tag>PERFORMANCE, CONTAINER, STORAGE</tag>
-    <description>The open container is cached on the data node side. We 
maintain
-      an LRU
-      cache for caching the recently used containers. This setting controls the
-      size of that cache.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ipc</name>
-    <value>9859</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>The ipc port number of container.</description>
-  </property>
-  <property>
-    <name>dfs.container.ipc.random.port</name>
-    <value>false</value>
-    <tag>OZONE, DEBUG, CONTAINER</tag>
-    <description>Allocates a random free port for ozone container. This is used
-      only while
-      running unit tests.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.datanode.storage.dir</name>
-    <value/>
-    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS</tag>
-    <description>This directory is used for storing Ratis metadata like logs. 
If
-      this is
-      not set then default metadata dirs is used. A warning will be logged if
-      this not set. Ideally, this should be mapped to a fast disk like an SSD.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.dir</name>
-    <value/>
-    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
-    <description>Determines where on the local filesystem HDDS data will be
-      stored. Defaults to dfs.datanode.data.dir if not specified.
-      The directories should be tagged with corresponding storage types
-      ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for storage policies. The default
-      storage type will be DISK if the directory does not have a storage type
-      tagged explicitly.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.volume.choosing.policy</name>
-    <value/>
-    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
-    <description>
-      The class name of the policy for choosing volumes in the list of
-      directories.  Defaults to
-      
org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy.
-      This volume choosing policy selects volumes in a round-robin order.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.enabled</name>
-    <value>false</value>
-    <tag>OZONE, MANAGEMENT, PIPELINE, RATIS</tag>
-    <description>Ozone supports different kinds of replication pipelines. Ratis
-      is one of
-      the replication pipeline supported by ozone.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.ipc</name>
-    <value>9858</value>
-    <tag>OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT</tag>
-    <description>The ipc port number of container.</description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.ipc.random.port</name>
-    <value>false</value>
-    <tag>OZONE,DEBUG</tag>
-    <description>Allocates a random free port for ozone ratis port for the
-      container. This
-      is used only while running unit tests.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.rpc.type</name>
-    <value>GRPC</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Ratis supports different kinds of transports like netty, GRPC,
-      Hadoop RPC
-      etc. This picks one of those for this cluster.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.num.write.chunk.threads</name>
-    <value>60</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>Maximum number of threads in the thread pool that Ratis
-      will use for writing chunks (60 by default).
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.replication.level</name>
-    <value>MAJORITY</value>
-    <tag>OZONE, RATIS</tag>
-    <description>Replication level to be used by datanode for submitting a
-      container command to ratis. Available replication levels are ALL and
-      MAJORTIY, MAJORITY is used as the default replication level.
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.segment.size</name>
-    <value>1073741824</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>The size of the raft segment used by Apache Ratis on 
datanodes.
-      (1 GB by default)
-    </description>
-  </property>
-  <property>
-    <name>dfs.container.ratis.segment.preallocated.size</name>
-    <value>134217728</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>The size of the buffer which is preallocated for raft segment
-      used by Apache Ratis on datanodes.(128 MB by default)
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.client.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis client request.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.client.request.max.retries</name>
-    <value>180</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Number of retries for ratis client request.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.client.request.retry.interval</name>
-    <value>100ms</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Interval between successive retries for a ratis client 
request.
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.retry-cache.timeout.duration</name>
-    <value>600000ms</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>Retry Cache entry timeout for ratis server.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server request.</description>
-  </property>
-  <property>
-    <name>dfs.ratis.leader.election.minimum.timeout.duration</name>
-    <value>1s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The minimum timeout duration for ratis leader election.
-        Default is 1s.
-    </description>
-  </property>
-  <property>
-    <name>dfs.ratis.server.failure.duration</name>
-    <value>120s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server failure detection,
-      once the threshold has reached, the ratis state machine will be informed
-      about the failure in the ratis ring
-    </description>
-  </property>
-  <property>
-    <name>hdds.node.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send node report. Each
-      datanode periodically send node report to SCM. Unit could be
-      defined with postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>hdds.container.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send container report. Each
-      datanode periodically send container report to SCM. Unit could be
-      defined with postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>hdds.command.status.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, CONTAINER, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send status of command
-      execution. Each datanode periodically the execution status of commands
-      received from SCM to SCM. Unit could be defined with postfix
-      (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>hdds.pipeline.report.interval</name>
-    <value>60000ms</value>
-    <tag>OZONE, PIPELINE, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send pipeline report. Each
-      datanode periodically send pipeline report to SCM. Unit could be
-      defined with postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <!--Ozone Settings-->
-  <property>
-    <name>ozone.administrators</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>Ozone administrator users delimited by the comma.
-      If not set, only the user who launches an ozone service will be the admin
-      user. This property must be set if ozone services are started by 
different
-      users. Otherwise, the RPC layer will reject calls from other servers 
which
-      are started by users not in the list.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.container.limit.per.interval</name>
-    <value>10</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A maximum number of containers to be scanned by block deleting
-      service per
-      time interval. The block deleting service spawns a thread to handle block
-      deletions in a container. This property is used to throttle the number of
-      threads spawned for block deletions.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.limit.per.task</name>
-    <value>1000</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A maximum number of blocks to be deleted by block deleting
-      service per
-      time interval. This property is used to throttle the actual number of
-      block deletions on a data node per container.
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.service.interval</name>
-    <value>1m</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>Time interval of the block deleting service.
-      The block deleting service runs on each datanode periodically and
-      deletes blocks queued for deletion. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)
-    </description>
-  </property>
-  <property>
-    <name>ozone.block.deleting.service.timeout</name>
-    <value>300000ms</value>
-    <tag>OZONE, PERFORMANCE, SCM</tag>
-    <description>A timeout value of block deletion service. If this is set
-      greater than 0,
-      the service will stop waiting for the block deleting completion after 
this
-      time. If timeout happens to a large proportion of block deletion, this
-      needs to be increased with ozone.block.deleting.limit.per.task. This
-      setting supports multiple time unit suffixes as described in
-      dfs.heartbeat.interval. If no suffix is specified, then milliseconds is
-      assumed.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.connection.timeout</name>
-    <value>5000ms</value>
-    <tag>OZONE, PERFORMANCE, CLIENT</tag>
-    <description>Connection timeout for Ozone client in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.max.retries</name>
-    <value>50</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Maximum number of retries by Ozone Client on encountering
-      exception while fetching committed block length.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.retry.interval</name>
-    <value>200ms</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Interval between retries by Ozone Client on encountering
-      exception while fetching committed block length.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.protocol</name>
-    <value>org.apache.hadoop.ozone.client.rpc.RpcClient</value>
-    <tag>OZONE, CLIENT, MANAGEMENT</tag>
-    <description>Protocol class to be used by the client to connect to ozone
-      cluster.
-      The build-in implementation includes:
-      org.apache.hadoop.ozone.client.rpc.RpcClient for RPC
-      org.apache.hadoop.ozone.client.rest.RestClient for REST
-      The default is the RpClient. Please do not change this unless you have a
-      very good understanding of what you are doing.
-    </description>
-  </property>
-  <property>
-    <name>ozone.client.socket.timeout</name>
-    <value>5000ms</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Socket timeout for Ozone client. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>ozone.enabled</name>
-    <value>false</value>
-    <tag>OZONE, REQUIRED</tag>
-    <description>
-      Status of the Ozone Object Storage service is enabled.
-      Set to true to enable Ozone.
-      Set to false to disable Ozone.
-      Unless this value is set to true, Ozone services will not be started in
-      the cluster.
-
-      Please note: By default ozone is disabled on a hadoop cluster.
-    </description>
-  </property>
-  <property>
-    <name>ozone.key.deleting.limit.per.task</name>
-    <value>1000</value>
-    <tag>OM, PERFORMANCE</tag>
-    <description>
-      A maximum number of keys to be scanned by key deleting service
-      per time interval in OM. Those keys are sent to delete metadata and
-      generate transactions in SCM for next async deletion between SCM
-      and DataNode.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.address</name>
-    <value/>
-    <tag>OM, REQUIRED</tag>
-    <description>
-      The address of the Ozone OM service. This allows clients to discover
-      the address of the OM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.group.rights</name>
-    <value>READ_WRITE</value>
-    <tag>OM, SECURITY</tag>
-    <description>
-      Default group permissions in Ozone OM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.handler.count.key</name>
-    <value>20</value>
-    <tag>OM, PERFORMANCE</tag>
-    <description>
-      The number of RPC handler threads for OM service endpoints.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.http-address</name>
-    <value>0.0.0.0:9874</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the OM web UI will listen on.
-
-      If the port is 0, then the server will start on a free port. However, it
-      is best to specify a well-known port, so it is easy to connect and see
-      the OM management UI.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      The actual address the OM web server will bind to. If this optional
-      the address is set, it overrides only the hostname portion of
-      ozone.om.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.http.enabled</name>
-    <value>true</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable OM web user interface.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.https-address</name>
-    <value>0.0.0.0:9875</value>
-    <tag>OM, MANAGEMENT, SECURITY</tag>
-    <description>
-      The address and the base port where the OM web UI will listen
-      on using HTTPS.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OM, MANAGEMENT, SECURITY</tag>
-    <description>
-      The actual address the OM web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion 
of
-      ozone.om.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.keytab.file</name>
-    <value/>
-    <tag>OM, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in OM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.db.cache.size.mb</name>
-    <value>128</value>
-    <tag>OM, PERFORMANCE</tag>
-    <description>
-      The size of OM DB cache in MB that used for caching files.
-      This value is set to an abnormally low value in the default 
configuration.
-      That is to make unit testing easy. Generally, this value should be set to
-      something like 16GB or more, if you intend to use Ozone at scale.
-
-      A large value for this key allows a proportionally larger amount of OM
-      metadata to be cached in memory. This makes OM operations faster.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.user.max.volume</name>
-    <value>1024</value>
-    <tag>OM, MANAGEMENT</tag>
-    <description>
-      The maximum number of volumes a user can have on a cluster.Increasing or
-      decreasing this number has no real impact on ozone cluster. This is
-      defined only for operational purposes. Only an administrator can create a
-      volume, once a volume is created there are no restrictions on the number
-      of buckets or keys inside each bucket a user can create.
-    </description>
-  </property>
-  <property>
-    <name>ozone.om.user.rights</name>
-    <value>READ_WRITE</value>
-    <tag>OM, SECURITY</tag>
-    <description>
-      Default user permissions used in OM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.metadata.dirs</name>
-    <value/>
-    <tag>OZONE, OM, SCM, CONTAINER, REQUIRED, STORAGE</tag>
-    <description>
-      Ozone metadata is shared among OM, which acts as the namespace
-      manager for ozone, SCM which acts as the block manager and data nodes
-      which maintain the name of the key(Key Name and BlockIDs). This
-      replicated and distributed metadata store is maintained under the
-      directory pointed by this key. Since metadata can be I/O intensive, at
-      least on OM and SCM we recommend having SSDs. If you have the luxury
-      of mapping this path to SSDs on all machines in the cluster, that will
-      be excellent.
-
-      If Ratis metadata directories are not specified, Ratis server will emit a
-      warning and use this path for storing its metadata too.
-    </description>
-  </property>
-  <property>
-    <name>ozone.metastore.impl</name>
-    <value>RocksDB</value>
-    <tag>OZONE, OM, SCM, CONTAINER, STORAGE</tag>
-    <description>
-      Ozone metadata store implementation. Ozone metadata are well
-      distributed to multiple services such as ozoneManager, scm. They are 
stored in
-      some local key-value databases. This property determines which database
-      library to use. Supported value is either LevelDB or RocksDB.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.metastore.rocksdb.statistics</name>
-    <value>ALL</value>
-    <tag>OZONE, OM, SCM, STORAGE, PERFORMANCE</tag>
-    <description>
-      The statistics level of the rocksdb store. If you use any value from
-      org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb
-      statistics will be exposed over JMX bean with the choosed setting. Set
-      it to OFF to not initialize rocksdb statistics at all. Please note that
-      collection of statistics could have 5-10% performance penalty.
-      Check the rocksdb documentation for more details.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.scm.block.client.address</name>
-    <value/>
-    <tag>OZONE, SCM</tag>
-    <description>The address of the Ozone SCM block client service. If not
-      defined value of ozone.scm.client.address is used.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.bind.host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The hostname or IP address used by the SCM block client
-      endpoint to bind.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.client.port</name>
-    <value>9863</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The port number of the Ozone SCM block client service.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.deletion.max.retry</name>
-    <value>4096</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      SCM wraps up many blocks in a deletion transaction and sends that to data
-      node for physical deletion periodically. This property determines how 
many
-      times SCM is going to retry sending a deletion operation to the data 
node.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.block.size.in.mb</name>
-    <value>256</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      The default size of a scm block in bytes. This is maps to the default
-      Ozone block size.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.chunk.size</name>
-    <value>16777216</value>
-    <tag>OZONE, SCM, CONTAINER, PERFORMANCE</tag>
-    <description>
-      The chunk size for reading/writing chunk operations in bytes.
-
-      The chunk size defaults to 8MB. If the value configured is more than the
-      maximum size (16MB), it will be reset to the maximum size. This maps to
-      the network packet sizes and file write operations in the client to
-      datanode protocol.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.address</name>
-    <value/>
-    <tag>OZONE, SCM, REQUIRED</tag>
-    <description>
-      The address of the Ozone SCM client service. This is a required setting.
-
-      It is a string in the host:port format. The port number is optional
-      and defaults to 9860.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.bind.host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>The hostname or IP address used by the SCM client endpoint to
-      bind.
-      This setting is used by the SCM only and never used by clients.
-
-      The setting can be useful in multi-homed setups to restrict the
-      availability of the SCM client service to a specific interface.
-
-      The default is appropriate for most clusters.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.client.port</name>
-    <value>9860</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>The port number of the Ozone SCM client service.</description>
-  </property>
-  <property>
-    <name>ozone.scm.keyvalue.container.deletion-choosing.policy</name>
-    <value>
-      
org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
-    </value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The policy used for choosing desired keyvalue containers for block 
deletion.
-      Datanode selects some containers to process block deletion
-      in a certain interval defined by ozone.block.deleting.service.interval.
-      The number of containers to process in each interval is defined
-      by ozone.block.deleting.container.limit.per.interval. This property is
-      used to configure the policy applied while selecting containers.
-      There are two policies supporting now:
-      RandomContainerDeletionChoosingPolicy and
-      TopNOrderedContainerDeletionChoosingPolicy.
-      
org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy
-      implements a simply random policy that to return a random list of
-      containers.
-      
org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy
-      implements a policy that choosing top count number of containers in a
-      pending-deletion-blocks's num
-      based descending order.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.placement.impl</name>
-    <value>
-      
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom
-    </value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>Placement policy class for containers.
-      Defaults to SCMContainerPlacementRandom.class
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.provision_batch_size</name>
-    <value>20</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>Pre-provision specified number of containers for block
-      allocation.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.container.size</name>
-    <value>5GB</value>
-    <tag>OZONE, PERFORMANCE, MANAGEMENT</tag>
-    <description>
-      Default container size used by Ozone.
-      There are two considerations while picking this number. The speed at 
which
-      a container can be replicated, determined by the network speed and the
-      metadata that each container generates. So selecting a large number
-      creates less SCM metadata, but recovery time will be more. 5GB is a 
number
-      that maps to quick replication times in gigabit networks, but still
-      balances the amount of metadata.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.address</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address of the Ozone SCM service used for internal
-      communication between the DataNodes and the SCM.
-
-      It is a string in the host:port format. The port number is optional
-      and defaults to 9861.
-
-      This setting is optional. If unspecified then the hostname portion
-      is picked from the ozone.scm.client.address setting and the
-      default service port of 9861 is chosen.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.bind.host</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The hostname or IP address used by the SCM service endpoint to
-      bind.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.id</name>
-    <value/>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>The path that datanodes will use to store the datanode ID.
-      If this value is not set, then datanode ID is created under the
-      metadata directory.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.datanode.port</name>
-    <value>9861</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The port number of the Ozone SCM service.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.db.cache.size.mb</name>
-    <value>128</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>SCM keeps track of the Containers in the cluster. This DB 
holds
-      the container metadata. This value is set to a small value to make the
-      unit
-      testing runs smooth. In production, we recommend a value of 16GB or
-      higher. This allows SCM to avoid disk I/O's while looking up the 
container
-      location.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.dead.node.interval</name>
-    <value>10m</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The interval between heartbeats before a node is tagged as dead.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.handler.count.key</name>
-    <value>10</value>
-    <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
-    <description>
-      The number of RPC handler threads for each SCM service
-      endpoint.
-
-      The default is appropriate for small clusters (tens of nodes).
-
-      Set a value that is appropriate for the cluster size. Generally, HDFS
-      recommends RPC handler count is set to 20 * log2(Cluster Size) with an
-      upper limit of 200. However, SCM will not have the same amount of
-      traffic as Namenode, so a value much smaller than that will work well 
too.
-    </description>
-  </property>
-  <property>
-    <name>hdds.heartbeat.interval</name>
-    <value>30s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The heartbeat interval from a data node to SCM. Yes,
-      it is not three but 30, since most data nodes will heart beating via 
Ratis
-      heartbeats. If a client is not able to talk to a data node, it will 
notify
-      OM/SCM eventually. So a 30 second HB seems to work. This assumes that
-      replication strategy used is Ratis if not, this value should be set to
-      something smaller like 3 seconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.log.warn.interval.count</name>
-    <value>10</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Defines how frequently we will log the missing of a heartbeat to SCM.
-      For example in the default case, we will write a warning message for each
-      ten consecutive heartbeats that we miss to SCM. This helps in reducing
-      clutter in a data node log, but trade off is that logs will have less of
-      this statement.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.rpc-timeout</name>
-    <value>1000</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Timeout value for the RPC from Datanode to SCM in milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.heartbeat.thread.interval</name>
-    <value>3s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      When a heartbeat from the data node arrives on SCM, It is queued for
-      processing with the time stamp of when the heartbeat arrived. There is a
-      heartbeat processing thread inside SCM that runs at a specified interval.
-      This value controls how frequently this thread is run.
-
-      There are some assumptions build into SCM such as this value should allow
-      the heartbeat processing thread to run at least three times more
-      frequently than heartbeats and at least five times more than stale node
-      detection time. If you specify a wrong value, SCM will gracefully refuse
-      to run. For more info look at the node manager tests in SCM.
-
-      In short, you don't need to change this.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http-address</name>
-    <value>0.0.0.0:9876</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the SCM web ui will listen on.
-
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the SCM web server will bind to. If this
-      optional address is set, it overrides only the hostname portion of
-      ozone.scm.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.http.enabled</name>
-    <value>true</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      Property to enable or disable SCM web ui.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.https-address</name>
-    <value>0.0.0.0:9877</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The address and the base port where the SCM web UI will listen
-      on using HTTPS.
-
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.https-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The actual address the SCM web server will bind to using HTTPS.
-      If this optional address is set, it overrides only the hostname portion 
of
-      ozone.scm.http-address.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.keytab.file</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>
-      The keytab file for Kerberos authentication in SCM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.names</name>
-    <value/>
-    <tag>OZONE, REQUIRED</tag>
-    <description>
-      The value of this property is a set of DNS | DNS:PORT | IP
-      Address | IP:PORT. Written as a comma separated string. e.g. scm1,
-      scm2:8020, 7.7.7.7:7777.
-      This property allows datanodes to discover where SCM is, so that
-      datanodes can send heartbeat to SCM.
-    </description>
-  </property>
-  <property>
-    <name>ozone.scm.stale.node.interval</name>
-    <value>90s</value>
-    <tag>OZONE, MANAGEMENT</tag>
-    <description>
-      The interval for stale node flagging. Please
-      see ozone.scm.heartbeat.thread.interval before changing this value.
-    </description>
-  </property>
-  <property>
-    <name>ozone.trace.enabled</name>
-    <value>false</value>
-    <tag>OZONE, DEBUG</tag>
-    <description>
-      Setting this flag to true dumps the HTTP request/ response in
-      the logs. Very useful when debugging REST protocol.
-    </description>
-  </property>
-  <property>
-    <name>ozone.web.authentication.kerberos.principal</name>
-    <value/>
-    <tag>OZONE, SECURITY</tag>
-    <description>
-      The server principal used by the SCM and OM for web UI SPNEGO
-      authentication when Kerberos security is enabled. This is typically set 
to
-      HTTP/[email protected] The SPNEGO server principal begins with the prefix
-      HTTP/ by convention.
-
-      If the value is '*', the web server will attempt to login with
-      every principal specified in the keytab file.
-    </description>
-  </property>
-
-  <!--Client Settings-->
-  <property>
-    <name>scm.container.client.idle.threshold</name>
-    <value>10s</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      In the standalone pipelines, the SCM clients use netty to
-      communicate with the container. It also uses connection pooling to
-      reduce client side overheads. This allows a connection to stay idle for
-      a while before the connection is closed.
-    </description>
-  </property>
-  <property>
-    <name>scm.container.client.max.size</name>
-    <value>256</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Controls the maximum number of connections that we cached via
-      clientconnection pooling. If the number of connection
-      exceed this count then the oldest idle connection is evicted.
-    </description>
-  </property>
-
-  <property>
-    <name>scm.container.client.max.outstanding.requests</name>
-    <value>100</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Controls the maximum number of outstanding async requests that can be
-      handled by the Standalone as well as Ratis client.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.scm.container.creation.lease.timeout</name>
-    <value>60s</value>
-    <tag>OZONE, SCM</tag>
-    <description>
-      Container creation timeout in milliseconds to be used by SCM. When
-      BEGIN_CREATE event happens the container is moved from ALLOCATED to
-      CREATING state, SCM will now wait for the configured amount of time
-      to get COMPLETE_CREATE event if it doesn't receive it will move the
-      container to DELETING.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.key.preallocation.maxsize</name>
-    <value>134217728</value>
-    <tag>OZONE, OM, PERFORMANCE</tag>
-    <description>
-      When a new key write request is sent to OM, if a size is requested, at 
most
-      128MB of size is allocated at request time. If client needs more space 
for the
-      write, separate block allocation requests will be made.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.client.list.cache</name>
-    <value>1000</value>
-    <tag>OZONE, PERFORMANCE</tag>
-    <description>
-      Configuration property to configure the cache size of client list calls.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.replication</name>
-    <value>3</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      Default replication value. The actual number of replications can be
-      specified when writing the key. The default is used if replication
-      is not specified. Supported values: 1 and 3.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.replication.type</name>
-    <value>RATIS</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      Default replication type to be used while writing key into ozone. The
-      value can be specified when writing the key, default is used when
-      nothing is specified. Supported values: RATIS, STAND_ALONE and CHAINED.
-    </description>
-  </property>
-  <property>
-    <name>hdds.container.close.threshold</name>
-    <value>0.9f</value>
-    <tag>OZONE, DATANODE</tag>
-    <description>
-      This determines the threshold to be used for closing a container.
-      When the container used percentage reaches this threshold,
-      the container will be closed. Value should be a positive, non-zero
-      percentage in float notation (X.Yf), with 1.0f meaning 100%.
-    </description>
-  </property>
-  <property>
-    <name>ozone.rest.client.http.connection.max</name>
-    <value>100</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      This defines the overall connection limit for the connection pool used in
-      RestClient.
-    </description>
-  </property>
-  <property>
-    <name>ozone.rest.client.http.connection.per-route.max</name>
-    <value>20</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>
-      This defines the connection limit per one HTTP route/host. Total max
-      connection is limited by ozone.rest.client.http.connection.max property.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.open.key.cleanup.service.interval.seconds</name>
-    <value>86400</value>
-    <tag>OZONE, OM, PERFORMANCE</tag>
-    <description>
-      A background job periodically checks open key entries and delete the 
expired ones. This entry controls the
-      interval of this cleanup check.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.open.key.expire.threshold</name>
-    <value>86400</value>
-    <tag>OZONE, OM, PERFORMANCE</tag>
-    <description>
-      Controls how long an open key operation is considered active. 
Specifically, if a key
-      has been open longer than the value of this config entry, that open key 
is considered as
-      expired (e.g. due to client crash). Default to 24 hours.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.tags.custom</name>
-    
<value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY</value>
-  </property>
-
-  <property>
-    <name>ozone.tags.system</name>
-    
<value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE,S3GATEWAY</value>
-  </property>
-
-
-  <property>
-    <name>hdds.rest.rest-csrf.enabled</name>
-    <value>false</value>
-    <description>
-      If true, then enables Object Store REST server protection against
-      cross-site request forgery (CSRF).
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.rest.http-address</name>
-    <value>0.0.0.0:9880</value>
-    <description>The http address of Object Store REST server inside the
-      datanode.</description>
-  </property>
-
-
-  <property>
-    <name>hdds.rest.netty.high.watermark</name>
-    <value>65535</value>
-    <description>
-      High watermark configuration to Netty for Object Store REST server.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.rest.netty.low.watermark</name>
-    <value>32768</value>
-    <description>
-      Low watermark configuration to Netty for Object Store REST server.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.datanode.plugins</name>
-    <value>org.apache.hadoop.ozone.web.OzoneHddsDatanodeService</value>
-    <description>
-      Comma-separated list of HDDS datanode plug-ins to be activated when
-      HDDS service starts as part of datanode.
-    </description>
-  </property>
-  <property>
-    <name>hdds.datanode.storage.utilization.warning.threshold</name>
-    <value>0.75</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>
-      If a datanode overall storage utilization exceeds more than this
-      value, a warning will be logged while processing the nodeReport in SCM.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.datanode.storage.utilization.critical.threshold</name>
-    <value>0.95</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>
-      If a datanode overall storage utilization exceeds more than this
-      value, the datanode will be marked out of space.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.write.lock.reporting.threshold.ms</name>
-    <value>5000</value>
-    <tag>OZONE, DATANODE, MANAGEMENT</tag>
-    <description>
-      When a write lock is held for a long time, this will be logged as the
-      lock is released. This sets how long the lock must be held for logging
-      to occur.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.lock.suppress.warning.interval.ms</name>
-    <value>10000</value>
-    <tag>OZONE, DATANODE, MANAGEMENT</tag>
-    <description>
-      Instrumentation reporting long critical sections will suppress
-      consecutive warnings within this interval.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.command.status.report.interval</name>
-    <value>30s</value>
-    <tag>OZONE, DATANODE, MANAGEMENT</tag>
-    <description>Time interval of the datanode to send status of commands
-      executed since last report. Unit could be defined with
-      postfix (ns,ms,s,m,h,d)</description>
-  </property>
-  <property>
-    <name>ozone.scm.pipeline.creation.lease.timeout</name>
-    <value>60s</value>
-    <tag>OZONE, SCM, PIPELINE</tag>
-    <description>
-      Pipeline creation timeout in milliseconds to be used by SCM. When
-      BEGIN_CREATE event happens the pipeline is moved from ALLOCATED to
-      CREATING state, SCM will now wait for the configured amount of time
-      to get COMPLETE_CREATE event if it doesn't receive it will move the
-      pipeline to DELETING.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.chillmode.threshold.pct</name>
-    <value>0.99</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description> % of containers which should have at least one
-      reported replica before SCM comes out of chill mode.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.scm.chillmode.enabled</name>
-    <value>true</value>
-    <tag>HDDS,SCM,OPERATION</tag>
-    <description>Boolean value to enable or disable SCM chill mode.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.container.action.max.limit</name>
-    <value>20</value>
-    <tag>DATANODE</tag>
-    <description>
-      Maximum number of Container Actions sent by the datanode to SCM in a
-      single heartbeat.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.pipeline.action.max.limit</name>
-    <value>20</value>
-    <tag>DATANODE</tag>
-    <description>
-      Maximum number of Pipeline Actions sent by the datanode to SCM in a
-      single heartbeat.
-    </description>
-  </property>
-  <property>
-    <name>hdds.scm.watcher.timeout</name>
-    <value>10m</value>
-    <tag>OZONE, SCM, MANAGEMENT</tag>
-    <description>
-      Timeout for the watchers of the HDDS SCM CommandWatchers. After this
-      duration the Copy/Delete container commands will be sent again to the
-      datanode unless the datanode confirms the completion.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.db.profile</name>
-    <value>SSD</value>
-    <tag>OZONE, OM, PERFORMANCE, REQUIRED</tag>
-    <description>This property allows user to pick a configuration
-    that tunes the RocksDB settings for the hardware it is running
-    on. Right now, we have SSD and DISK as profile options.</description>
-  </property>
-
-  <property>
-    <name>hdds.datanode.replication.work.dir</name>
-    <tag>DATANODE</tag>
-    <description>Temporary which is used during the container replication
-      betweeen datanodes. Should have enough space to store multiple container
-      (in compressed format), but doesn't require fast io access such as SSD.
-    </description>
-  </property>
-
-  <property>
-    <name>hdds.lock.max.concurrency</name>
-    <value>100</value>
-    <tag>HDDS</tag>
-    <description>Locks in HDDS/Ozone uses object pool to maintain active locks
-      in the system, this property defines the max limit for the locks that
-      will be maintained in the pool.
-    </description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.authentication.kerberos.principal</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The server principal used by Ozone S3Gateway server. This is
-      typically set to
-      HTTP/[email protected] The SPNEGO server principal begins with the prefix
-      HTTP/ by convention.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.domain.name</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>List of Ozone S3Gateway domain names. If multiple
-      domain names to be provided, they should be a "," seperated.
-      This parameter is only required when virtual host style pattern is
-      followed.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.http-address</name>
-    <value>0.0.0.0:9878</value>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The address and the base port where the Ozone S3Gateway
-      Server will
-      listen on.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.http-bind-host</name>
-    <value>0.0.0.0</value>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The actual address the HTTP server will bind to. If this 
optional address
-      is set, it overrides only the hostname portion of ozone.s3g.http-address.
-      This is useful for making the Ozone S3Gateway HTTP server listen on all
-      interfaces by setting it to 0.0.0.0.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.http.enabled</name>
-    <value>true</value>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The boolean which enables the Ozone S3Gateway server
-      .</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.https-address</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>Ozone S3Gateway serverHTTPS server address and port
-      .</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.https-bind-host</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The actual address the HTTPS server will bind to. If this 
optional address
-      is set, it overrides only the hostname portion of 
ozone.s3g.https-address.
-      This is useful for making the Ozone S3Gateway HTTPS server listen on all
-      interfaces by setting it to 0.0.0.0.</description>
-  </property>
-
-  <property>
-    <name>ozone.s3g.keytab.file</name>
-    <value/>
-    <tag>OZONE, S3GATEWAY</tag>
-    <description>The keytab file used by the S3Gateway server to login as its
-      service principal. </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
deleted file mode 100644
index 7966941..0000000
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-/**
- Test cases for SCM client classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c392da8/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
deleted file mode 100644
index 03c45c5..0000000
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-/**
- * This class is to test acl stoarge and retreival in ozone store.
- */
-public class TestOzoneAcls {
-
-  @Test
-  public void testAclParse() {
-    HashMap<String, Boolean> testMatrix;
-    testMatrix = new HashMap<>();
-
-    testMatrix.put("user:bilbo:r", Boolean.TRUE);
-    testMatrix.put("user:bilbo:w", Boolean.TRUE);
-    testMatrix.put("user:bilbo:rw", Boolean.TRUE);
-    testMatrix.put("user:bilbo:wr", Boolean.TRUE);
-    testMatrix.put("    user:bilbo:wr   ", Boolean.TRUE);
-
-
-    // ACLs makes no judgement on the quality of
-    // user names. it is for the userAuth interface
-    // to determine if a user name is really a name
-    testMatrix.put(" user:*:rw", Boolean.TRUE);
-    testMatrix.put(" user:~!:rw", Boolean.TRUE);
-
-
-    testMatrix.put("", Boolean.FALSE);
-    testMatrix.put(null, Boolean.FALSE);
-    testMatrix.put(" user:bilbo:", Boolean.FALSE);
-    testMatrix.put(" user:bilbo:rx", Boolean.FALSE);
-    testMatrix.put(" user:bilbo:mk", Boolean.FALSE);
-    testMatrix.put(" user::rw", Boolean.FALSE);
-    testMatrix.put("user11:bilbo:rw", Boolean.FALSE);
-    testMatrix.put(" user:::rw", Boolean.FALSE);
-
-    testMatrix.put(" group:hobbit:r", Boolean.TRUE);
-    testMatrix.put(" group:hobbit:w", Boolean.TRUE);
-    testMatrix.put(" group:hobbit:rw", Boolean.TRUE);
-    testMatrix.put(" group:hobbit:wr", Boolean.TRUE);
-    testMatrix.put(" group:*:rw", Boolean.TRUE);
-    testMatrix.put(" group:~!:rw", Boolean.TRUE);
-
-    testMatrix.put(" group:hobbit:", Boolean.FALSE);
-    testMatrix.put(" group:hobbit:rx", Boolean.FALSE);
-    testMatrix.put(" group:hobbit:mk", Boolean.FALSE);
-    testMatrix.put(" group::", Boolean.FALSE);
-    testMatrix.put(" group::rw", Boolean.FALSE);
-    testMatrix.put(" group22:hobbit:", Boolean.FALSE);
-    testMatrix.put(" group:::rw", Boolean.FALSE);
-
-    testMatrix.put("JUNK group:hobbit:r", Boolean.FALSE);
-    testMatrix.put("JUNK group:hobbit:w", Boolean.FALSE);
-    testMatrix.put("JUNK group:hobbit:rw", Boolean.FALSE);
-    testMatrix.put("JUNK group:hobbit:wr", Boolean.FALSE);
-    testMatrix.put("JUNK group:*:rw", Boolean.FALSE);
-    testMatrix.put("JUNK group:~!:rw", Boolean.FALSE);
-
-    testMatrix.put(" world::r", Boolean.TRUE);
-    testMatrix.put(" world::w", Boolean.TRUE);
-    testMatrix.put(" world::rw", Boolean.TRUE);
-    testMatrix.put(" world::wr", Boolean.TRUE);
-
-    testMatrix.put(" world:bilbo:w", Boolean.FALSE);
-    testMatrix.put(" world:bilbo:rw", Boolean.FALSE);
-
-    Set<String> keys = testMatrix.keySet();
-    for (String key : keys) {
-      if (testMatrix.get(key)) {
-        OzoneAcl.parseAcl(key);
-      } else {
-        try {
-          OzoneAcl.parseAcl(key);
-          // should never get here since parseAcl will throw
-          fail("An exception was expected but did not happen.");
-        } catch (IllegalArgumentException e) {
-          // nothing to do
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testAclValues() {
-    OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw");
-    assertEquals(acl.getName(), "bilbo");
-    assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
-    assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("user:bilbo:wr");
-    assertEquals("bilbo", acl.getName());
-    assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
-    assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("user:bilbo:r");
-    assertEquals("bilbo", acl.getName());
-    assertEquals(OzoneAcl.OzoneACLRights.READ, acl.getRights());
-    assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("user:bilbo:w");
-    assertEquals("bilbo", acl.getName());
-    assertEquals(OzoneAcl.OzoneACLRights.WRITE, acl.getRights());
-    assertEquals(OzoneAcl.OzoneACLType.USER, acl.getType());
-
-    acl = OzoneAcl.parseAcl("group:hobbit:wr");
-    assertEquals(acl.getName(), "hobbit");
-    assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
-    assertEquals(OzoneAcl.OzoneACLType.GROUP, acl.getType());
-
-    acl = OzoneAcl.parseAcl("world::wr");
-    assertEquals(acl.getName(), "");
-    assertEquals(OzoneAcl.OzoneACLRights.READ_WRITE, acl.getRights());
-    assertEquals(OzoneAcl.OzoneACLType.WORLD, acl.getType());
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to