http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 0000000,7d32568..fb10e9c
mode 000000,100644..100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@@ -1,0 -1,873 +1,880 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ /**
+  * These .proto interfaces are private and stable.
+  * Please see http://wiki.apache.org/hadoop/Compatibility
+  * for what changes are allowed for a *stable* .proto interface.
+  */
+ 
+ option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+ option java_outer_classname = "ClientNamenodeProtocolProtos";
+ option java_generic_services = true;
+ option java_generate_equals_and_hash = true;
+ package hadoop.hdfs;
+ 
+ import "Security.proto";
+ import "hdfs.proto";
+ import "acl.proto";
+ import "xattr.proto";
+ import "encryption.proto";
+ import "inotify.proto";
++import "erasurecoding.proto";
+ 
+ /**
+  * The ClientNamenodeProtocol Service defines the interface between a client 
+  * (as runnign inside a MR Task) and the Namenode.
+  * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc 
+  * for each of the methods.
+  * The exceptions declared in the above class also apply to this protocol.
+  * Exceptions are unwrapped and thrown by the  PB libraries.
+  */
+ 
+ message GetBlockLocationsRequestProto {
+   required string src = 1;     // file name
+   required uint64 offset = 2;  // range start offset
+   required uint64 length = 3;  // range length
+ }
+ 
+ message GetBlockLocationsResponseProto {
+   optional LocatedBlocksProto locations = 1;
+ }
+ 
+ message GetServerDefaultsRequestProto { // No parameters
+ }
+ 
+ message GetServerDefaultsResponseProto {
+   required FsServerDefaultsProto serverDefaults = 1;
+ }
+ 
+ enum CreateFlagProto {
+   CREATE = 0x01;    // Create a file
+   OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC
+   APPEND = 0x04;    // Append to a file
+   LAZY_PERSIST = 0x10; // File with reduced durability guarantees.
+   NEW_BLOCK = 0x20; // Write data to a new block when appending
+ }
+ 
+ message CreateRequestProto {
+   required string src = 1;
+   required FsPermissionProto masked = 2;
+   required string clientName = 3;
+   required uint32 createFlag = 4;  // bits set using CreateFlag
+   required bool createParent = 5;
+   required uint32 replication = 6; // Short: Only 16 bits used
+   required uint64 blockSize = 7;
+   repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
+ }
+ 
+ message CreateResponseProto {
+   optional HdfsFileStatusProto fs = 1;
+ }
+ 
+ message AppendRequestProto {
+   required string src = 1;
+   required string clientName = 2;
+   optional uint32 flag = 3; // bits set using CreateFlag
+ }
+ 
+ message AppendResponseProto {
+   optional LocatedBlockProto block = 1;
+   optional HdfsFileStatusProto stat = 2;
+ }
+ 
+ message SetReplicationRequestProto {
+   required string src = 1;
+   required uint32 replication = 2; // Short: Only 16 bits used
+ }
+ 
+ message SetReplicationResponseProto {
+   required bool result = 1;
+ }
+ 
+ message SetStoragePolicyRequestProto {
+   required string src = 1;
+   required string policyName = 2;
+ }
+ 
+ message SetStoragePolicyResponseProto { // void response
+ }
+ 
+ message GetStoragePolicyRequestProto {
+   required string path = 1;
+ }
+ 
+ message GetStoragePolicyResponseProto {
+   required BlockStoragePolicyProto storagePolicy = 1;
+ }
+ 
+ message GetStoragePoliciesRequestProto { // void request
+ }
+ 
+ message GetStoragePoliciesResponseProto {
+   repeated BlockStoragePolicyProto policies = 1;
+ }
+ 
+ message SetPermissionRequestProto {
+   required string src = 1;
+   required FsPermissionProto permission = 2;
+ }
+ 
+ message SetPermissionResponseProto { // void response
+ }
+ 
+ message SetOwnerRequestProto {
+   required string src = 1;
+   optional string username = 2;
+   optional string groupname = 3;
+ }
+ 
+ message SetOwnerResponseProto { // void response
+ }
+ 
+ message AbandonBlockRequestProto {
+   required ExtendedBlockProto b = 1;
+   required string src = 2;
+   required string holder = 3;
+   optional uint64 fileId = 4 [default = 0];  // default to 
GRANDFATHER_INODE_ID
+ }
+ 
+ message AbandonBlockResponseProto { // void response
+ }
+ 
+ message AddBlockRequestProto {
+   required string src = 1;
+   required string clientName = 2;
+   optional ExtendedBlockProto previous = 3;
+   repeated DatanodeInfoProto excludeNodes = 4;
+   optional uint64 fileId = 5 [default = 0];  // default as a bogus id
+   repeated string favoredNodes = 6; //the set of datanodes to use for the 
block
+ }
+ 
+ message AddBlockResponseProto {
+   required LocatedBlockProto block = 1;
+ }
+ 
+ message GetAdditionalDatanodeRequestProto {
+   required string src = 1;
+   required ExtendedBlockProto blk = 2;
+   repeated DatanodeInfoProto existings = 3;
+   repeated DatanodeInfoProto excludes = 4;
+   required uint32 numAdditionalNodes = 5;
+   required string clientName = 6;
+   repeated string existingStorageUuids = 7;
+   optional uint64 fileId = 8 [default = 0];  // default to 
GRANDFATHER_INODE_ID
+ }
+ 
+ message GetAdditionalDatanodeResponseProto {
+   required LocatedBlockProto block = 1;
+ }
+ 
+ message CompleteRequestProto {
+   required string src = 1;
+   required string clientName = 2;
+   optional ExtendedBlockProto last = 3;
+   optional uint64 fileId = 4 [default = 0];  // default to 
GRANDFATHER_INODE_ID
+ }
+ 
+ message CompleteResponseProto {
+   required bool result = 1;
+ }
+ 
+ message ReportBadBlocksRequestProto {
+   repeated LocatedBlockProto blocks = 1;
+ }
+ 
+ message ReportBadBlocksResponseProto { // void response
+ }
+ 
+ message ConcatRequestProto {
+   required string trg = 1;
+   repeated string srcs = 2;
+ }
+ 
+ message ConcatResponseProto { // void response
+ }
+ 
+ message TruncateRequestProto {
+   required string src = 1;
+   required uint64 newLength = 2;
+   required string clientName = 3;
+ }
+ 
+ message TruncateResponseProto {
+   required bool result = 1;
+ }
+ 
+ message RenameRequestProto {
+   required string src = 1;
+   required string dst = 2;
+ }
+ 
+ message RenameResponseProto {
+   required bool result = 1;
+ }
+ 
+ 
+ message Rename2RequestProto {
+   required string src = 1;
+   required string dst = 2;
+   required bool overwriteDest = 3;
+ }
+ 
+ message Rename2ResponseProto { // void response
+ }
+ 
+ message DeleteRequestProto {
+   required string src = 1;
+   required bool recursive = 2;
+ }
+ 
+ message DeleteResponseProto {
+     required bool result = 1;
+ }
+ 
+ message MkdirsRequestProto {
+   required string src = 1;
+   required FsPermissionProto masked = 2;
+   required bool createParent = 3;
+ }
+ message MkdirsResponseProto {
+     required bool result = 1;
+ }
+ 
+ message GetListingRequestProto {
+   required string src = 1;
+   required bytes startAfter = 2;
+   required bool needLocation = 3;
+ }
+ message GetListingResponseProto {
+   optional DirectoryListingProto dirList = 1;
+ }
+ 
+ message GetSnapshottableDirListingRequestProto { // no input parameters
+ }
+ message GetSnapshottableDirListingResponseProto {
+   optional SnapshottableDirectoryListingProto snapshottableDirList = 1;
+ }
+ 
+ message GetSnapshotDiffReportRequestProto {
+   required string snapshotRoot = 1;
+   required string fromSnapshot = 2;
+   required string toSnapshot = 3;
+ }
+ message GetSnapshotDiffReportResponseProto {
+   required SnapshotDiffReportProto diffReport = 1;
+ }
+ 
+ message RenewLeaseRequestProto {
+   required string clientName = 1;
+ }
+ 
+ message RenewLeaseResponseProto { //void response
+ }
+ 
+ message RecoverLeaseRequestProto {
+   required string src = 1;
+   required string clientName = 2;
+ }
+ message RecoverLeaseResponseProto {
+   required bool result = 1;
+ }
+ 
+ message GetFsStatusRequestProto { // no input paramters
+ }
+ 
+ message GetFsStatsResponseProto {
+   required uint64 capacity = 1;
+   required uint64 used = 2;
+   required uint64 remaining = 3;
+   required uint64 under_replicated = 4;
+   required uint64 corrupt_blocks = 5;
+   required uint64 missing_blocks = 6;
+   optional uint64 missing_repl_one_blocks = 7;
+ }
+ 
+ enum DatanodeReportTypeProto {  // type of the datanode report
+   ALL = 1;
+   LIVE = 2;
+   DEAD = 3;
+   DECOMMISSIONING = 4;
+ }
+ 
+ message GetDatanodeReportRequestProto {
+   required DatanodeReportTypeProto type = 1;
+ }
+ 
+ message GetDatanodeReportResponseProto {
+   repeated DatanodeInfoProto di = 1;
+ }
+ 
+ message GetDatanodeStorageReportRequestProto {
+   required DatanodeReportTypeProto type = 1;
+ }
+ 
+ message DatanodeStorageReportProto {
+   required DatanodeInfoProto datanodeInfo = 1;
+   repeated StorageReportProto storageReports = 2;
+ }
+ 
+ message GetDatanodeStorageReportResponseProto {
+   repeated DatanodeStorageReportProto datanodeStorageReports = 1;
+ }
+ 
+ message GetPreferredBlockSizeRequestProto {
+   required string filename = 1;
+ }
+ 
+ message GetPreferredBlockSizeResponseProto {
+   required uint64 bsize = 1;
+ }
+ 
+ enum SafeModeActionProto {
+   SAFEMODE_LEAVE = 1;
+   SAFEMODE_ENTER = 2;
+   SAFEMODE_GET = 3;
+ }
+ 
+ message SetSafeModeRequestProto {
+   required SafeModeActionProto action = 1;
+   optional bool checked = 2 [default = false];
+ }
+ 
+ message SetSafeModeResponseProto {
+   required bool result = 1;
+ }
+ 
+ message SaveNamespaceRequestProto {
+   optional uint64 timeWindow = 1 [default = 0];
+   optional uint64 txGap = 2 [default = 0];
+ }
+ 
+ message SaveNamespaceResponseProto { // void response
+   optional bool saved = 1 [default = true];
+ }
+ 
+ message RollEditsRequestProto { // no parameters
+ }
+ 
+ message RollEditsResponseProto { // response
+   required uint64 newSegmentTxId = 1;
+ }
+ 
+ message RestoreFailedStorageRequestProto {
+   required string arg = 1;
+ }
+ 
+ message RestoreFailedStorageResponseProto {
+     required bool result = 1;
+ }
+ 
+ message RefreshNodesRequestProto { // no parameters
+ }
+ 
+ message RefreshNodesResponseProto { // void response
+ }
+ 
+ message FinalizeUpgradeRequestProto { // no parameters
+ }
+ 
+ message FinalizeUpgradeResponseProto { // void response
+ }
+ 
+ enum RollingUpgradeActionProto {
+   QUERY = 1;
+   START = 2;
+   FINALIZE = 3;
+ }
+ 
+ message RollingUpgradeRequestProto {
+   required RollingUpgradeActionProto action = 1;
+ }
+ 
+ message RollingUpgradeInfoProto {
+   required RollingUpgradeStatusProto status = 1;
+   required uint64 startTime = 2;
+   required uint64 finalizeTime = 3;
+   required bool createdRollbackImages = 4;
+ }
+ 
+ message RollingUpgradeResponseProto {
+   optional RollingUpgradeInfoProto rollingUpgradeInfo= 1;
+ }
+ 
+ message ListCorruptFileBlocksRequestProto {
+   required string path = 1;
+   optional string cookie = 2;
+ }
+ 
+ message ListCorruptFileBlocksResponseProto {
+   required CorruptFileBlocksProto corrupt = 1;
+ }
+ 
+ message MetaSaveRequestProto {
+   required string filename = 1;
+ }
+ 
+ message MetaSaveResponseProto { // void response
+ }
+ 
+ message GetFileInfoRequestProto {
+   required string src = 1;
+ }
+ 
+ message GetFileInfoResponseProto {
+   optional HdfsFileStatusProto fs = 1;
+ }
+ 
+ message IsFileClosedRequestProto {
+   required string src = 1;
+ }
+ 
+ message IsFileClosedResponseProto {
+   required bool result = 1;
+ }
+ 
+ message CacheDirectiveInfoProto {
+   optional int64 id = 1;
+   optional string path = 2;
+   optional uint32 replication = 3;
+   optional string pool = 4;
+   optional CacheDirectiveInfoExpirationProto expiration = 5;
+ }
+ 
+ message CacheDirectiveInfoExpirationProto {
+   required int64 millis = 1;
+   required bool isRelative = 2;
+ }
+ 
+ message CacheDirectiveStatsProto {
+   required int64 bytesNeeded = 1;
+   required int64 bytesCached = 2;
+   required int64 filesNeeded = 3;
+   required int64 filesCached = 4;
+   required bool hasExpired = 5;
+ }
+ 
+ enum CacheFlagProto {
+   FORCE = 0x01;    // Ignore pool resource limits
+ }
+ 
+ message AddCacheDirectiveRequestProto {
+   required CacheDirectiveInfoProto info = 1;
+   optional uint32 cacheFlags = 2;  // bits set using CacheFlag
+ }
+ 
+ message AddCacheDirectiveResponseProto {
+   required int64 id = 1;
+ }
+ 
+ message ModifyCacheDirectiveRequestProto {
+   required CacheDirectiveInfoProto info = 1;
+   optional uint32 cacheFlags = 2;  // bits set using CacheFlag
+ }
+ 
+ message ModifyCacheDirectiveResponseProto {
+ }
+ 
+ message RemoveCacheDirectiveRequestProto {
+   required int64 id = 1;
+ }
+ 
+ message RemoveCacheDirectiveResponseProto {
+ }
+ 
+ message ListCacheDirectivesRequestProto {
+   required int64 prevId = 1;
+   required CacheDirectiveInfoProto filter = 2;
+ }
+ 
+ message CacheDirectiveEntryProto {
+   required CacheDirectiveInfoProto info = 1;
+   required CacheDirectiveStatsProto stats = 2;
+ }
+ 
+ message ListCacheDirectivesResponseProto {
+   repeated CacheDirectiveEntryProto elements = 1;
+   required bool hasMore = 2;
+ }
+ 
+ message CachePoolInfoProto {
+   optional string poolName = 1;
+   optional string ownerName = 2;
+   optional string groupName = 3;
+   optional int32 mode = 4;
+   optional int64 limit = 5;
+   optional int64 maxRelativeExpiry = 6;
+ }
+ 
+ message CachePoolStatsProto {
+   required int64 bytesNeeded = 1;
+   required int64 bytesCached = 2;
+   required int64 bytesOverlimit = 3;
+   required int64 filesNeeded = 4;
+   required int64 filesCached = 5;
+ }
+ 
+ message AddCachePoolRequestProto {
+   required CachePoolInfoProto info = 1;
+ }
+ 
+ message AddCachePoolResponseProto { // void response
+ }
+ 
+ message ModifyCachePoolRequestProto {
+   required CachePoolInfoProto info = 1;
+ }
+ 
+ message ModifyCachePoolResponseProto { // void response
+ }
+ 
+ message RemoveCachePoolRequestProto {
+   required string poolName = 1;
+ }
+ 
+ message RemoveCachePoolResponseProto { // void response
+ }
+ 
+ message ListCachePoolsRequestProto {
+   required string prevPoolName = 1;
+ }
+ 
+ message ListCachePoolsResponseProto {
+   repeated CachePoolEntryProto entries = 1;
+   required bool hasMore = 2;
+ }
+ 
+ message CachePoolEntryProto {
+   required CachePoolInfoProto info = 1;
+   required CachePoolStatsProto stats = 2;
+ }
+ 
+ message GetFileLinkInfoRequestProto {
+   required string src = 1;
+ }
+ 
+ message GetFileLinkInfoResponseProto {
+   optional HdfsFileStatusProto fs = 1;
+ }
+ 
+ message GetContentSummaryRequestProto {
+   required string path = 1;
+ }
+ 
+ message GetContentSummaryResponseProto {
+   required ContentSummaryProto summary = 1;
+ }
+ 
+ message SetQuotaRequestProto {
+   required string path = 1;
+   required uint64 namespaceQuota = 2;
+   required uint64 storagespaceQuota = 3;
+   optional StorageTypeProto storageType = 4;
+ }
+ 
+ message SetQuotaResponseProto { // void response
+ }
+ 
+ message FsyncRequestProto {
+   required string src = 1;
+   required string client = 2;
+   optional sint64 lastBlockLength = 3 [default = -1];
+   optional uint64 fileId = 4 [default = 0];  // default to 
GRANDFATHER_INODE_ID
+ }
+ 
+ message FsyncResponseProto { // void response
+ }
+ 
+ message SetTimesRequestProto {
+   required string src = 1;
+   required uint64 mtime = 2;
+   required uint64 atime = 3;
+ }
+ 
+ message SetTimesResponseProto { // void response
+ }
+ 
+ message CreateSymlinkRequestProto {
+   required string target = 1;
+   required string link = 2;
+   required FsPermissionProto dirPerm = 3;
+   required bool createParent = 4;
+ }
+ 
+ message CreateSymlinkResponseProto { // void response
+ }
+ 
+ message GetLinkTargetRequestProto {
+   required string path = 1;
+ }
+ message GetLinkTargetResponseProto {
+   optional string targetPath = 1;
+ }
+ 
+ message UpdateBlockForPipelineRequestProto {
+   required ExtendedBlockProto block = 1;
+   required string clientName = 2;
+ }
+ 
+ message UpdateBlockForPipelineResponseProto {
+   required LocatedBlockProto block = 1;
+ }
+ 
+ message UpdatePipelineRequestProto {
+   required string clientName = 1;
+   required ExtendedBlockProto oldBlock = 2;
+   required ExtendedBlockProto newBlock = 3;
+   repeated DatanodeIDProto newNodes = 4;
+   repeated string storageIDs = 5;
+ }
+ 
+ message UpdatePipelineResponseProto { // void response
+ }
+ 
+ message SetBalancerBandwidthRequestProto {
+   required int64 bandwidth = 1;
+ }
+ 
+ message SetBalancerBandwidthResponseProto { // void response
+ }
+ 
+ message GetDataEncryptionKeyRequestProto { // no parameters
+ }
+ 
+ message GetDataEncryptionKeyResponseProto {
+   optional DataEncryptionKeyProto dataEncryptionKey = 1;
+ }
+ 
+ message CreateSnapshotRequestProto {
+   required string snapshotRoot = 1;
+   optional string snapshotName = 2;
+ }
+ 
+ message CreateSnapshotResponseProto {
+   required string snapshotPath = 1;
+ }
+ 
+ message RenameSnapshotRequestProto {
+   required string snapshotRoot = 1;
+   required string snapshotOldName = 2;
+   required string snapshotNewName = 3;
+ }
+ 
+ message RenameSnapshotResponseProto { // void response
+ }
+ 
+ message AllowSnapshotRequestProto {
+   required string snapshotRoot = 1;
+ }
+ 
+ message AllowSnapshotResponseProto {
+ }
+ 
+ message DisallowSnapshotRequestProto {
+   required string snapshotRoot = 1;
+ }
+ 
+ message DisallowSnapshotResponseProto {
+ }
+ 
+ message DeleteSnapshotRequestProto {
+   required string snapshotRoot = 1;
+   required string snapshotName = 2;
+ }
+ 
+ message DeleteSnapshotResponseProto { // void response
+ }
+ 
+ message CheckAccessRequestProto {
+   required string path = 1;
+   required AclEntryProto.FsActionProto mode = 2;
+ }
+ 
+ message CheckAccessResponseProto { // void response
+ }
+ 
+ message GetCurrentEditLogTxidRequestProto {
+ }
+ 
+ message GetCurrentEditLogTxidResponseProto {
+   required int64 txid = 1;
+ }
+ 
+ message GetEditsFromTxidRequestProto {
+   required int64 txid = 1;
+ }
+ 
+ message GetEditsFromTxidResponseProto {
+   required EventsListProto eventsList = 1;
+ }
+ 
+ service ClientNamenodeProtocol {
+   rpc getBlockLocations(GetBlockLocationsRequestProto)
+       returns(GetBlockLocationsResponseProto);
+   rpc getServerDefaults(GetServerDefaultsRequestProto)
+       returns(GetServerDefaultsResponseProto);
+   rpc create(CreateRequestProto)returns(CreateResponseProto);
+   rpc append(AppendRequestProto) returns(AppendResponseProto);
+   rpc setReplication(SetReplicationRequestProto)
+       returns(SetReplicationResponseProto);
+   rpc setStoragePolicy(SetStoragePolicyRequestProto)
+       returns(SetStoragePolicyResponseProto);
+   rpc getStoragePolicy(GetStoragePolicyRequestProto)
+       returns(GetStoragePolicyResponseProto);
+   rpc getStoragePolicies(GetStoragePoliciesRequestProto)
+       returns(GetStoragePoliciesResponseProto);
+   rpc setPermission(SetPermissionRequestProto)
+       returns(SetPermissionResponseProto);
+   rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);
+   rpc abandonBlock(AbandonBlockRequestProto) 
returns(AbandonBlockResponseProto);
+   rpc addBlock(AddBlockRequestProto) returns(AddBlockResponseProto);
+   rpc getAdditionalDatanode(GetAdditionalDatanodeRequestProto)
+       returns(GetAdditionalDatanodeResponseProto);
+   rpc complete(CompleteRequestProto) returns(CompleteResponseProto);
+   rpc reportBadBlocks(ReportBadBlocksRequestProto)
+       returns(ReportBadBlocksResponseProto);
+   rpc concat(ConcatRequestProto) returns(ConcatResponseProto);
+   rpc truncate(TruncateRequestProto) returns(TruncateResponseProto);
+   rpc rename(RenameRequestProto) returns(RenameResponseProto);
+   rpc rename2(Rename2RequestProto) returns(Rename2ResponseProto);
+   rpc delete(DeleteRequestProto) returns(DeleteResponseProto);
+   rpc mkdirs(MkdirsRequestProto) returns(MkdirsResponseProto);
+   rpc getListing(GetListingRequestProto) returns(GetListingResponseProto);
+   rpc renewLease(RenewLeaseRequestProto) returns(RenewLeaseResponseProto);
+   rpc recoverLease(RecoverLeaseRequestProto)
+       returns(RecoverLeaseResponseProto);
+   rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
+   rpc getDatanodeReport(GetDatanodeReportRequestProto)
+       returns(GetDatanodeReportResponseProto);
+   rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
+       returns(GetDatanodeStorageReportResponseProto);
+   rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
+       returns(GetPreferredBlockSizeResponseProto);
+   rpc setSafeMode(SetSafeModeRequestProto)
+       returns(SetSafeModeResponseProto);
+   rpc saveNamespace(SaveNamespaceRequestProto)
+       returns(SaveNamespaceResponseProto);
+   rpc rollEdits(RollEditsRequestProto)
+       returns(RollEditsResponseProto);
+   rpc restoreFailedStorage(RestoreFailedStorageRequestProto)
+       returns(RestoreFailedStorageResponseProto);
+   rpc refreshNodes(RefreshNodesRequestProto) 
returns(RefreshNodesResponseProto);
+   rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
+       returns(FinalizeUpgradeResponseProto);
+   rpc rollingUpgrade(RollingUpgradeRequestProto)
+       returns(RollingUpgradeResponseProto);
+   rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
+       returns(ListCorruptFileBlocksResponseProto);
+   rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
+   rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
+   rpc addCacheDirective(AddCacheDirectiveRequestProto)
+       returns (AddCacheDirectiveResponseProto);
+   rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)
+       returns (ModifyCacheDirectiveResponseProto);
+   rpc removeCacheDirective(RemoveCacheDirectiveRequestProto)
+       returns (RemoveCacheDirectiveResponseProto);
+   rpc listCacheDirectives(ListCacheDirectivesRequestProto)
+       returns (ListCacheDirectivesResponseProto);
+   rpc addCachePool(AddCachePoolRequestProto)
+       returns(AddCachePoolResponseProto);
+   rpc modifyCachePool(ModifyCachePoolRequestProto)
+       returns(ModifyCachePoolResponseProto);
+   rpc removeCachePool(RemoveCachePoolRequestProto)
+       returns(RemoveCachePoolResponseProto);
+   rpc listCachePools(ListCachePoolsRequestProto)
+       returns(ListCachePoolsResponseProto);
+   rpc getFileLinkInfo(GetFileLinkInfoRequestProto)
+       returns(GetFileLinkInfoResponseProto);
+   rpc getContentSummary(GetContentSummaryRequestProto)
+       returns(GetContentSummaryResponseProto);
+   rpc setQuota(SetQuotaRequestProto) returns(SetQuotaResponseProto);
+   rpc fsync(FsyncRequestProto) returns(FsyncResponseProto);
+   rpc setTimes(SetTimesRequestProto) returns(SetTimesResponseProto);
+   rpc createSymlink(CreateSymlinkRequestProto)
+       returns(CreateSymlinkResponseProto);
+   rpc getLinkTarget(GetLinkTargetRequestProto)
+       returns(GetLinkTargetResponseProto);
+   rpc updateBlockForPipeline(UpdateBlockForPipelineRequestProto)
+       returns(UpdateBlockForPipelineResponseProto);
+   rpc updatePipeline(UpdatePipelineRequestProto)
+       returns(UpdatePipelineResponseProto);
+   rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto)
+       returns(hadoop.common.GetDelegationTokenResponseProto);
+   rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto)
+       returns(hadoop.common.RenewDelegationTokenResponseProto);
+   rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto)
+       returns(hadoop.common.CancelDelegationTokenResponseProto);
+   rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
+       returns(SetBalancerBandwidthResponseProto);
+   rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
+       returns(GetDataEncryptionKeyResponseProto);
+   rpc createSnapshot(CreateSnapshotRequestProto)
+       returns(CreateSnapshotResponseProto);
+   rpc renameSnapshot(RenameSnapshotRequestProto)
+       returns(RenameSnapshotResponseProto);
+   rpc allowSnapshot(AllowSnapshotRequestProto)
+       returns(AllowSnapshotResponseProto);
+   rpc disallowSnapshot(DisallowSnapshotRequestProto)
+       returns(DisallowSnapshotResponseProto);   
+   rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
+       returns(GetSnapshottableDirListingResponseProto);
+   rpc deleteSnapshot(DeleteSnapshotRequestProto)
+       returns(DeleteSnapshotResponseProto);
+   rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto)
+       returns(GetSnapshotDiffReportResponseProto);
+   rpc isFileClosed(IsFileClosedRequestProto)
+       returns(IsFileClosedResponseProto);
+   rpc modifyAclEntries(ModifyAclEntriesRequestProto)
+       returns(ModifyAclEntriesResponseProto);
+   rpc removeAclEntries(RemoveAclEntriesRequestProto)
+       returns(RemoveAclEntriesResponseProto);
+   rpc removeDefaultAcl(RemoveDefaultAclRequestProto)
+       returns(RemoveDefaultAclResponseProto);
+   rpc removeAcl(RemoveAclRequestProto)
+       returns(RemoveAclResponseProto);
+   rpc setAcl(SetAclRequestProto)
+       returns(SetAclResponseProto);
+   rpc getAclStatus(GetAclStatusRequestProto)
+       returns(GetAclStatusResponseProto);
+   rpc setXAttr(SetXAttrRequestProto)
+       returns(SetXAttrResponseProto);
+   rpc getXAttrs(GetXAttrsRequestProto)
+       returns(GetXAttrsResponseProto);
+   rpc listXAttrs(ListXAttrsRequestProto)
+       returns(ListXAttrsResponseProto);
+   rpc removeXAttr(RemoveXAttrRequestProto)
+       returns(RemoveXAttrResponseProto);
+   rpc checkAccess(CheckAccessRequestProto)
+       returns(CheckAccessResponseProto);
+   rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
+       returns(CreateEncryptionZoneResponseProto);
+   rpc listEncryptionZones(ListEncryptionZonesRequestProto)
+       returns(ListEncryptionZonesResponseProto);
+   rpc getEZForPath(GetEZForPathRequestProto)
+       returns(GetEZForPathResponseProto);
++  rpc createErasureCodingZone(CreateErasureCodingZoneRequestProto)
++      returns(CreateErasureCodingZoneResponseProto);
+   rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto)
+       returns(GetCurrentEditLogTxidResponseProto);
+   rpc getEditsFromTxid(GetEditsFromTxidRequestProto)
+       returns(GetEditsFromTxidResponseProto);
++  rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto)
++      returns(GetErasureCodingPoliciesResponseProto);
++  rpc getErasureCodingZone(GetErasureCodingZoneRequestProto)
++      returns(GetErasureCodingZoneResponseProto);
+ }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
index 0000000,0000000..d27f782
new file mode 100644
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
@@@ -1,0 -1,0 +1,68 @@@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++ 
++option java_package = "org.apache.hadoop.hdfs.protocol.proto";
++option java_outer_classname = "ErasureCodingProtos";
++option java_generate_equals_and_hash = true;
++package hadoop.hdfs;
++
++import "hdfs.proto";
++
++/**
++ * ErasureCodingZone
++ */
++message ErasureCodingZoneProto {
++  required string dir = 1;
++  required ErasureCodingPolicyProto ecPolicy = 2;
++}
++
++message CreateErasureCodingZoneRequestProto {
++  required string src = 1;
++  optional ErasureCodingPolicyProto ecPolicy = 2;
++}
++
++message CreateErasureCodingZoneResponseProto {
++}
++
++message GetErasureCodingPoliciesRequestProto { // void request
++}
++
++message GetErasureCodingPoliciesResponseProto {
++  repeated ErasureCodingPolicyProto ecPolicies = 1;
++}
++
++message GetErasureCodingZoneRequestProto {
++  required string src = 1; // path to get the zone info
++}
++
++message GetErasureCodingZoneResponseProto {
++  optional ErasureCodingZoneProto ECZone = 1;
++}
++
++/**
++ * Block erasure coding recovery info
++ */
++message BlockECRecoveryInfoProto {
++  required ExtendedBlockProto block = 1;
++  required DatanodeInfosProto sourceDnInfos = 2;
++  required DatanodeInfosProto targetDnInfos = 3;
++  required StorageUuidsProto targetStorageUuids = 4;
++  required StorageTypesProto targetStorageTypes = 5;
++  repeated uint32 liveBlockIndices = 6;
++  required ErasureCodingPolicyProto ecPolicy = 7;
++}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 0000000,86fb462..63fe90c
mode 000000,100644..100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@@ -1,0 -1,611 +1,648 @@@
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ /**
+  * These .proto interfaces are private and stable.
+  * Please see http://wiki.apache.org/hadoop/Compatibility
+  * for what changes are allowed for a *stable* .proto interface.
+  */
+ 
+ // This file contains protocol buffers that are used throughout HDFS -- i.e.
+ // by the client, server, and data transfer protocols.
+ 
+ 
+ option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+ option java_outer_classname = "HdfsProtos";
+ option java_generate_equals_and_hash = true;
+ package hadoop.hdfs;
+ 
+ import "Security.proto";
+ 
+ /**
+  * Extended block idenfies a block
+  */
+ message ExtendedBlockProto {
+   required string poolId = 1;   // Block pool id - gloablly unique across 
clusters
+   required uint64 blockId = 2;  // the local id within a pool
+   required uint64 generationStamp = 3;
+   optional uint64 numBytes = 4 [default = 0];  // len does not belong in ebid 
+                                                // here for historical reasons
+ }
+ 
+ /**
+  * Identifies a Datanode
+  */
+ message DatanodeIDProto {
+   required string ipAddr = 1;    // IP address
+   required string hostName = 2;  // hostname
+   required string datanodeUuid = 3;     // UUID assigned to the Datanode. For
+                                         // upgraded clusters this is the same
+                                         // as the original StorageID of the
+                                         // Datanode.
+   required uint32 xferPort = 4;  // data streaming port
+   required uint32 infoPort = 5;  // datanode http port
+   required uint32 ipcPort = 6;   // ipc server port
+   optional uint32 infoSecurePort = 7 [default = 0]; // datanode https port
+ }
+ 
+ /**
+  * Datanode local information
+  */
+ message DatanodeLocalInfoProto {
+   required string softwareVersion = 1;
+   required string configVersion = 2;
+   required uint64 uptime = 3;
+ }
+ 
+ /**
+  * DatanodeInfo array
+  */
+ message DatanodeInfosProto {
+   repeated DatanodeInfoProto datanodes = 1;
+ }
+ 
+ /**
+  * The status of a Datanode
+  */
+ message DatanodeInfoProto {
+   required DatanodeIDProto id = 1;
+   optional uint64 capacity = 2 [default = 0];
+   optional uint64 dfsUsed = 3 [default = 0];
+   optional uint64 remaining = 4 [default = 0];
+   optional uint64 blockPoolUsed = 5 [default = 0];
+   optional uint64 lastUpdate = 6 [default = 0];
+   optional uint32 xceiverCount = 7 [default = 0];
+   optional string location = 8;
+   enum AdminState {
+     NORMAL = 0;
+     DECOMMISSION_INPROGRESS = 1;
+     DECOMMISSIONED = 2;
+   }
+ 
+   optional AdminState adminState = 10 [default = NORMAL];
+   optional uint64 cacheCapacity = 11 [default = 0];
+   optional uint64 cacheUsed = 12 [default = 0];
+   optional uint64 lastUpdateMonotonic = 13 [default = 0];
+ }
+ 
+ /**
+  * Represents a storage available on the datanode
+  */
+ message DatanodeStorageProto {
+   enum StorageState {
+     NORMAL = 0;
+     READ_ONLY_SHARED = 1;
+   }
+ 
+   required string storageUuid = 1;
+   optional StorageState state = 2 [default = NORMAL];
+   optional StorageTypeProto storageType = 3 [default = DISK];
+ }
+ 
+ message StorageReportProto {
+   required string storageUuid = 1 [ deprecated = true ];
+   optional bool failed = 2 [ default = false ];
+   optional uint64 capacity = 3 [ default = 0 ];
+   optional uint64 dfsUsed = 4 [ default = 0 ];
+   optional uint64 remaining = 5 [ default = 0 ];
+   optional uint64 blockPoolUsed = 6 [ default = 0 ];
+   optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
+ }
+ 
+ /**
+  * Summary of a file or directory
+  */
+ message ContentSummaryProto {
+   required uint64 length = 1;
+   required uint64 fileCount = 2;
+   required uint64 directoryCount = 3;
+   required uint64 quota = 4;
+   required uint64 spaceConsumed = 5;
+   required uint64 spaceQuota = 6;
+   optional StorageTypeQuotaInfosProto typeQuotaInfos = 7;
+ }
+ 
+ /**
+  * Storage type quota and usage information of a file or directory
+  */
+ message StorageTypeQuotaInfosProto {
+   repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1;
+ }
+ 
+ message StorageTypeQuotaInfoProto {
+   required StorageTypeProto type = 1;
+   required uint64 quota = 2;
+   required uint64 consumed = 3;
+ }
+ 
+ /**
+  * Contains a list of paths corresponding to corrupt files and a cookie
+  * used for iterative calls to NameNode.listCorruptFileBlocks.
+  *
+  */
+ message CorruptFileBlocksProto {
+  repeated string files = 1;
+  required string   cookie = 2;
+ }
+ 
+ /**
+  * File or Directory permision - same spec as posix
+  */
+ message FsPermissionProto {
+   required uint32 perm = 1;       // Actually a short - only 16bits used
+ }
+ 
+ /**
+  * Types of recognized storage media.
+  */
+ enum StorageTypeProto {
+   DISK = 1;
+   SSD = 2;
+   ARCHIVE = 3;
+   RAM_DISK = 4;
+ }
+ 
+ /**
+  * A list of storage types. 
+  */
+ message StorageTypesProto {
+   repeated StorageTypeProto storageTypes = 1;
+ }
+ 
+ /**
+  * Block replica storage policy.
+  */
+ message BlockStoragePolicyProto {
+   required uint32 policyId = 1;
+   required string name = 2;
+   // a list of storage types for storing the block replicas when creating a
+   // block.
+   required StorageTypesProto creationPolicy = 3;
+   // A list of storage types for creation fallback storage.
+   optional StorageTypesProto creationFallbackPolicy = 4;
+   optional StorageTypesProto replicationFallbackPolicy = 5;
+ }
+ 
+ /**
+  * A list of storage IDs. 
+  */
+ message StorageUuidsProto {
+   repeated string storageUuids = 1;
+ }
+ 
+ /**
+  * A LocatedBlock gives information about a block and its location.
+  */ 
+ message LocatedBlockProto {
+   required ExtendedBlockProto b  = 1;
+   required uint64 offset = 2;           // offset of first byte of block in 
the file
+   repeated DatanodeInfoProto locs = 3;  // Locations ordered by proximity to 
client ip
+   required bool corrupt = 4;            // true if all replicas of a block 
are corrupt, else false
+                                         // If block has few corrupt replicas, 
they are filtered and 
+                                         // their locations are not part of 
this object
+ 
+   required hadoop.common.TokenProto blockToken = 5;
+   repeated bool isCached = 6 [packed=true]; // if a location in locs is cached
+   repeated StorageTypeProto storageTypes = 7;
+   repeated string storageIDs = 8;
++
++  // striped block related fields
++  repeated uint32 blockIndex = 9; // used for striped block to indicate block 
index for each storage
++  repeated hadoop.common.TokenProto blockTokens = 10; // each internal block 
has a block token
+ }
+ 
+ message DataEncryptionKeyProto {
+   required uint32 keyId = 1;
+   required string blockPoolId = 2;
+   required bytes nonce = 3;
+   required bytes encryptionKey = 4;
+   required uint64 expiryDate = 5;
+   optional string encryptionAlgorithm = 6;
+ }
+ 
+ /**
+  * Cipher suite.
+  */
+ enum CipherSuiteProto {
+     UNKNOWN = 1;
+     AES_CTR_NOPADDING = 2;
+ }
+ 
+ /**
+  * Crypto protocol version used to access encrypted files.
+  */
+ enum CryptoProtocolVersionProto {
+     UNKNOWN_PROTOCOL_VERSION = 1;
+     ENCRYPTION_ZONES = 2;
+ }
+ 
+ /**
+  * Encryption information for a file.
+  */
+ message FileEncryptionInfoProto {
+   required CipherSuiteProto suite = 1;
+   required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+   required bytes key = 3;
+   required bytes iv = 4;
+   required string keyName = 5;
+   required string ezKeyVersionName = 6;
+ }
+ 
+ /**
+  * Encryption information for an individual
+  * file within an encryption zone
+  */
+ message PerFileEncryptionInfoProto {
+   required bytes key = 1;
+   required bytes iv = 2;
+   required string ezKeyVersionName = 3;
+ }
+ 
+ /**
+  * Encryption information for an encryption
+  * zone
+  */
+ message ZoneEncryptionInfoProto {
+   required CipherSuiteProto suite = 1;
+   required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+   required string keyName = 3;
+ }
+ 
+ /**
+  * Cipher option
+  */
+ message CipherOptionProto {
+   required CipherSuiteProto suite = 1;
+   optional bytes inKey = 2;
+   optional bytes inIv = 3;
+   optional bytes outKey = 4;
+   optional bytes outIv = 5;
+ }
+ 
+ /**
+  * A set of file blocks and their locations.
+  */
+ message LocatedBlocksProto {
+   required uint64 fileLength = 1;
+   repeated LocatedBlockProto blocks = 2;
+   required bool underConstruction = 3;
+   optional LocatedBlockProto lastBlock = 4;
+   required bool isLastBlockComplete = 5;
+   optional FileEncryptionInfoProto fileEncryptionInfo = 6;
++
++  // Optional field for erasure coding
++  optional ErasureCodingPolicyProto ecPolicy = 7;
++}
++
++/**
++ * ECSchema options entry
++ */
++message ECSchemaOptionEntryProto {
++  required string key = 1;
++  required string value = 2;
++}
++
++/**
++ * ECSchema for erasurecoding
++ */
++message ECSchemaProto {
++  required string codecName = 1;
++  required uint32 dataUnits = 2;
++  required uint32 parityUnits = 3;
++  repeated ECSchemaOptionEntryProto options = 4;
++}
++
++message ErasureCodingPolicyProto {
++  required string name = 1;
++  required ECSchemaProto schema = 2;
++  required uint32 cellSize = 3;
+ }
+ 
+ /**
+  * Status of a file, directory or symlink
+  * Optionally includes a file's block locations if requested by client on the 
rpc call.
+  */
+ message HdfsFileStatusProto {
+   enum FileType {
+     IS_DIR = 1;
+     IS_FILE = 2;
+     IS_SYMLINK = 3;
+   }
+   required FileType fileType = 1;
+   required bytes path = 2;          // local name of inode encoded java UTF8
+   required uint64 length = 3;
+   required FsPermissionProto permission = 4;
+   required string owner = 5;
+   required string group = 6;
+   required uint64 modification_time = 7;
+   required uint64 access_time = 8;
+ 
+   // Optional fields for symlink
+   optional bytes symlink = 9;             // if symlink, target encoded java 
UTF8 
+ 
+   // Optional fields for file
+   optional uint32 block_replication = 10 [default = 0]; // only 16bits used
+   optional uint64 blocksize = 11 [default = 0];
+   optional LocatedBlocksProto locations = 12;  // suppled only if asked by 
client
+ 
+   // Optional field for fileId
+   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
+   optional int32 childrenNum = 14 [default = -1];
+   // Optional field for file encryption
+   optional FileEncryptionInfoProto fileEncryptionInfo = 15;
+ 
+   optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id
 -} 
++
++  // Optional field for erasure coding
++  optional ErasureCodingPolicyProto ecPolicy = 17;
++}
+ 
+ /**
+  * Checksum algorithms/types used in HDFS
+  * Make sure this enum's integer values match enum values' id properties 
defined
+  * in org.apache.hadoop.util.DataChecksum.Type
+  */
+ enum ChecksumTypeProto {
+   CHECKSUM_NULL = 0;
+   CHECKSUM_CRC32 = 1;
+   CHECKSUM_CRC32C = 2;
+ }
+ 
+ /**
+  * HDFS Server Defaults
+  */
+ message FsServerDefaultsProto {
+   required uint64 blockSize = 1;
+   required uint32 bytesPerChecksum = 2;
+   required uint32 writePacketSize = 3;
+   required uint32 replication = 4; // Actually a short - only 16 bits used
+   required uint32 fileBufferSize = 5;
+   optional bool encryptDataTransfer = 6 [default = false];
+   optional uint64 trashInterval = 7 [default = 0];
+   optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32];
+ }
+ 
+ 
+ /**
+  * Directory listing
+  */
+ message DirectoryListingProto {
+   repeated HdfsFileStatusProto partialListing = 1;
+   required uint32 remainingEntries  = 2;
+ }
+ 
+ /**
+  * Status of a snapshottable directory: besides the normal information for 
+  * a directory status, also include snapshot quota, number of snapshots, and
+  * the full path of the parent directory. 
+  */
+ message SnapshottableDirectoryStatusProto {
+   required HdfsFileStatusProto dirStatus = 1;
+ 
+   // Fields specific for snapshottable directory
+   required uint32 snapshot_quota = 2;
+   required uint32 snapshot_number = 3;
+   required bytes parent_fullpath = 4;
+ }
+ 
+ /**
+  * Snapshottable directory listing
+  */
+ message SnapshottableDirectoryListingProto {
+   repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
+ }
+ 
+ /**
+  * Snapshot diff report entry
+  */
+ message SnapshotDiffReportEntryProto {
+   required bytes fullpath = 1;
+   required string modificationLabel = 2;
+   optional bytes targetPath = 3;
+ }
+ 
+ /**
+  * Snapshot diff report
+  */
+ message SnapshotDiffReportProto {
+   // full path of the directory where snapshots were taken
+   required string snapshotRoot = 1;
+   required string fromSnapshot = 2;
+   required string toSnapshot = 3;
+   repeated SnapshotDiffReportEntryProto diffReportEntries = 4;
+ }
+ 
+ /**
+  * Common node information shared by all the nodes in the cluster
+  */
+ message StorageInfoProto {
+   required uint32 layoutVersion = 1; // Layout version of the file system
+   required uint32 namespceID = 2;    // File system namespace ID
+   required string clusterID = 3;     // ID of the cluster
+   required uint64 cTime = 4;         // File system creation time
+ }
+ 
+ /**
+  * Information sent by a namenode to identify itself to the primary namenode.
+  */
+ message NamenodeRegistrationProto {
+   required string rpcAddress = 1;    // host:port of the namenode RPC address
+   required string httpAddress = 2;   // host:port of the namenode http server
+   enum NamenodeRoleProto {
+     NAMENODE = 1;
+     BACKUP = 2;
+     CHECKPOINT = 3;
+   }
+   required StorageInfoProto storageInfo = 3;  // Node information
+   optional NamenodeRoleProto role = 4 [default = NAMENODE];        // 
Namenode role
+ }
+ 
+ /**
+  * Unique signature to identify checkpoint transactions.
+  */
+ message CheckpointSignatureProto {
+   required string blockPoolId = 1;
+   required uint64 mostRecentCheckpointTxId = 2;
+   required uint64 curSegmentTxId = 3;
+   required StorageInfoProto storageInfo = 4;
+ }
+ 
+ /**
+  * Command sent from one namenode to another namenode.
+  */
+ message NamenodeCommandProto {
+   enum Type {
+     NamenodeCommand = 0;      // Base command
+     CheckPointCommand = 1;    // Check point command
+   }
+   required uint32 action = 1;
+   required Type type = 2;
+   optional CheckpointCommandProto checkpointCmd = 3;
+ }
+ 
+ /**
+  * Command returned from primary to checkpointing namenode.
+  * This command has checkpoint signature that identifies
+  * checkpoint transaction and is needed for further
+  * communication related to checkpointing.
+  */
+ message CheckpointCommandProto {
+   // Unique signature to identify checkpoint transation
+   required CheckpointSignatureProto signature = 1; 
+ 
+   // If true, return transfer image to primary upon the completion of 
checkpoint
+   required bool needToReturnImage = 2;
+ }
+ 
+ /**
+  * Block information
+  *
+  * Please be wary of adding additional fields here, since INodeFiles
+  * need to fit in PB's default max message size of 64MB.
+  * We restrict the max # of blocks per file
+  * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better
+  * to avoid changing this.
+  */
+ message BlockProto {
+   required uint64 blockId = 1;
+   required uint64 genStamp = 2;
+   optional uint64 numBytes = 3 [default = 0];
+ }
+ 
+ /**
+  * Block and datanodes where is it located
+  */
+ message BlockWithLocationsProto {
+   required BlockProto block = 1;   // Block
+   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
+   repeated string storageUuids = 3;  // Storages with replicas of the block
+   repeated StorageTypeProto storageTypes = 4;
++
++  optional bytes indices = 5;
++  optional uint32 dataBlockNum = 6;
+ }
+ 
+ /**
+  * List of block with locations
+  */
+ message BlocksWithLocationsProto {
+   repeated BlockWithLocationsProto blocks = 1;
+ }
+ 
+ /**
+  * Editlog information with available transactions
+  */
+ message RemoteEditLogProto {
+   required uint64 startTxId = 1;  // Starting available edit log transaction
+   required uint64 endTxId = 2;    // Ending available edit log transaction
+   optional bool isInProgress = 3 [default = false];
+ }
+ 
+ /**
+  * Enumeration of editlogs available on a remote namenode
+  */
+ message RemoteEditLogManifestProto {
+   repeated RemoteEditLogProto logs = 1;
+ }
+ 
+ /**
+  * Namespace information that describes namespace on a namenode
+  */
+ message NamespaceInfoProto {
+   required string buildVersion = 1;         // Software revision version 
(e.g. an svn or git revision)
+   required uint32 unused = 2;               // Retained for backward 
compatibility
+   required string blockPoolID = 3;          // block pool used by the 
namespace
+   required StorageInfoProto storageInfo = 4;// Node information
+   required string softwareVersion = 5;      // Software version number (e.g. 
2.0.0)
+   optional uint64 capabilities = 6 [default = 0]; // feature flags
+ }
+ 
+ /**
+  * Block access token information
+  */
+ message BlockKeyProto {
+   required uint32 keyId = 1;      // Key identifier
+   required uint64 expiryDate = 2; // Expiry time in milliseconds
+   optional bytes keyBytes = 3;    // Key secret
+ }
+ 
+ /**
+  * Current key and set of block keys at the namenode.
+  */
+ message ExportedBlockKeysProto {
+   required bool isBlockTokenEnabled = 1;
+   required uint64 keyUpdateInterval = 2;
+   required uint64 tokenLifeTime = 3;
+   required BlockKeyProto currentKey = 4;
+   repeated BlockKeyProto allKeys = 5;
+ }
+ 
+ /**
+  * State of a block replica at a datanode
+  */
+ enum ReplicaStateProto {
+   FINALIZED = 0;  // State of a replica when it is not modified
+   RBW = 1;        // State of replica that is being written to
+   RWR = 2;        // State of replica that is waiting to be recovered
+   RUR = 3;        // State of replica that is under recovery
+   TEMPORARY = 4;  // State of replica that is created for replication
+ }
+ 
+ /**
+  * Block that needs to be recovered with at a given location
+  */
+ message RecoveringBlockProto {
+   required uint64 newGenStamp = 1;        // New genstamp post recovery
+   required LocatedBlockProto block = 2;   // Block to be recovered
+   optional BlockProto truncateBlock = 3;  // New block for recovery (truncate)
+ }
+ 
+ /**
+  * void request
+  */
+ message VersionRequestProto {
+ }
+ 
+ /**
+  * Version response from namenode.
+  */
+ message VersionResponseProto {
+   required NamespaceInfoProto info = 1;
+ }
+ 
+ /**
+  * Information related to a snapshot
+  * TODO: add more information
+  */
+ message SnapshotInfoProto {
+   required string snapshotName = 1;
+   required string snapshotRoot = 2;
+   required FsPermissionProto permission = 3;
+   required string owner = 4;
+   required string group = 5;
+   required string createTime = 6;
+   // TODO: do we need access time?
+ }
+ 
+ /**
+  * Rolling upgrade status
+  */
+ message RollingUpgradeStatusProto {
+   required string blockPoolId = 1;
+   optional bool finalized = 2 [default = false];
+ }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --cc hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 5ee7f4d,852b040..8b1ede8
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@@ -15,47 -15,46 +15,47 @@@
  # See the License for the specific language governing permissions and
  # limitations under the License.
  
+ MYNAME="${BASH_SOURCE-$0}"
+ 
  function hadoop_usage
  {
-   echo "Usage: hdfs [--config confdir] [--daemon (start|stop|status)]"
-   echo "           [--loglevel loglevel] COMMAND"
-   echo "       where COMMAND is one of:"
-   echo "  balancer             run a cluster balancing utility"
-   echo "  cacheadmin           configure the HDFS cache"
-   echo "  classpath            prints the class path needed to get the"
-   echo "                       Hadoop jar and the required libraries"
-   echo "  crypto               configure HDFS encryption zones"
-   echo "  datanode             run a DFS datanode"
-   echo "  dfs                  run a filesystem command on the file system"
-   echo "  dfsadmin             run a DFS admin client"
-   echo "  erasurecode          configure HDFS erasure coding zones"
-   echo "  fetchdt              fetch a delegation token from the NameNode"
-   echo "  fsck                 run a DFS filesystem checking utility"
-   echo "  getconf              get config values from configuration"
-   echo "  groups               get the groups which users belong to"
-   echo "  haadmin              run a DFS HA admin client"
-   echo "  jmxget               get JMX exported values from NameNode or 
DataNode."
-   echo "  journalnode          run the DFS journalnode"
-   echo "  lsSnapshottableDir   list all snapshottable dirs owned by the 
current user"
-   echo "                               Use -help to see options"
-   echo "  mover                run a utility to move block replicas across"
-   echo "                       storage types"
-   echo "  namenode             run the DFS namenode"
-   echo "                               Use -format to initialize the DFS 
filesystem"
-   echo "  nfs3                 run an NFS version 3 gateway"
-   echo "  oev                  apply the offline edits viewer to an edits 
file"
-   echo "  oiv                  apply the offline fsimage viewer to an fsimage"
-   echo "  oiv_legacy           apply the offline fsimage viewer to a legacy 
fsimage"
-   echo "  portmap              run a portmap service"
-   echo "  secondarynamenode    run the DFS secondary namenode"
-   echo "  snapshotDiff         diff two snapshots of a directory or diff the"
-   echo "                       current directory contents with a snapshot"
-   echo "  storagepolicies      list/get/set block storage policies"
-   echo "  version              print the version"
-   echo "  zkfc                 run the ZK Failover Controller daemon"
-   echo ""
-   echo "Most commands print help when invoked w/o parameters."
-   # There are also debug commands, but they don't show up in this listing.
+   hadoop_add_option "--buildpaths" "attempt to add class files from build 
tree"
+   hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
+   hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in slave 
mode"
+   hadoop_add_option "--loglevel level" "set the log4j level for this command"
+   hadoop_add_option "--hosts filename" "list of hosts to use in slave mode"
+   hadoop_add_option "--slaves" "turn on slave mode"
+ 
+   hadoop_add_subcommand "balancer" "run a cluster balancing utility"
+   hadoop_add_subcommand "cacheadmin" "configure the HDFS cache"
+   hadoop_add_subcommand "classpath" "prints the class path needed to get the 
hadoop jar and the required libraries"
+   hadoop_add_subcommand "crypto" "configure HDFS encryption zones"
+   hadoop_add_subcommand "datanode" "run a DFS datanode"
+   hadoop_add_subcommand "debug" "run a Debug Admin to execute HDFS debug 
commands"
+   hadoop_add_subcommand "dfs" "run a filesystem command on the file system"
+   hadoop_add_subcommand "dfsadmin" "run a DFS admin client"
++  hadoop_add_subcommand "erasurecode" "run a HDFS ErasureCoding CLI"
+   hadoop_add_subcommand "fetchdt" "fetch a delegation token from the NameNode"
+   hadoop_add_subcommand "fsck" "run a DFS filesystem checking utility"
+   hadoop_add_subcommand "getconf" "get config values from configuration"
+   hadoop_add_subcommand "groups" "get the groups which users belong to"
+   hadoop_add_subcommand "haadmin" "run a DFS HA admin client"
+   hadoop_add_subcommand "jmxget" "get JMX exported values from NameNode or 
DataNode."
+   hadoop_add_subcommand "journalnode" "run the DFS journalnode"
+   hadoop_add_subcommand "lsSnapshottableDir" "list all snapshottable dirs 
owned by the current user"
+   hadoop_add_subcommand "mover" "run a utility to move block replicas across 
storage types"
+   hadoop_add_subcommand "namenode" "run the DFS namenode"
+   hadoop_add_subcommand "nfs3" "run an NFS version 3 gateway"
+   hadoop_add_subcommand "oev" "apply the offline edits viewer to an edits 
file"
+   hadoop_add_subcommand "oiv" "apply the offline fsimage viewer to an fsimage"
+   hadoop_add_subcommand "oiv_legacy" "apply the offline fsimage viewer to a 
legacy fsimage"
+   hadoop_add_subcommand "portmap" "run a portmap service"
+   hadoop_add_subcommand "secondarynamenode" "run the DFS secondary namenode"
+   hadoop_add_subcommand "snapshotDiff" "diff two snapshots of a directory or 
diff the current directory contents with a snapshot"
+   hadoop_add_subcommand "storagepolicies" "list/get/set block storage 
policies"
+   hadoop_add_subcommand "version" "print the version"
+   hadoop_add_subcommand "zkfc" "run the ZK Failover Controller daemon"
+   hadoop_generate_usage "${MYNAME}" false
  }
  
  # let's locate libexec...

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index af23d56,1af3a49..5eba08a
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@@ -25,7 -24,6 +24,7 @@@ import org.apache.hadoop.classification
  import org.apache.hadoop.fs.CommonConfigurationKeys;
  import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
  import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
- import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolarent;
++import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
  import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
  import org.apache.hadoop.http.HttpConfig;
  
@@@ -435,8 -434,6 +443,8 @@@ public class DFSConfigKeys extends Comm
    public static final Class<BlockPlacementPolicyDefault> 
DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT = BlockPlacementPolicyDefault.class;
    public static final String  DFS_REPLICATION_MAX_KEY = "dfs.replication.max";
    public static final int     DFS_REPLICATION_MAX_DEFAULT = 512;
 +  public static final String DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY = 
"dfs.block.placement.ec.classname";
-   public static final Class<BlockPlacementPolicyRackFaultTolarent> 
DFS_BLOCK_PLACEMENT_EC_CLASSNAME_DEFAULT = 
BlockPlacementPolicyRackFaultTolarent.class;
++  public static final Class<BlockPlacementPolicyRackFaultTolerant> 
DFS_BLOCK_PLACEMENT_EC_CLASSNAME_DEFAULT = 
BlockPlacementPolicyRackFaultTolerant.class;
  
    public static final String  DFS_DF_INTERVAL_KEY = "dfs.df.interval";
    public static final int     DFS_DF_INTERVAL_DEFAULT = 60000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6c3f0ee,7f3722f..35c4f9a
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@@ -1139,13 -1140,28 +1139,14 @@@ implements ByteBufferReadable, CanSetDr
    }
  
    /**
 -   * Used when reading contiguous blocks
 -   */
 -  private void actualGetFromOneDataNode(final DNAddrPair datanode,
 -      LocatedBlock block, final long start, final long end, byte[] buf,
 -      int offset, Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
 -      throws IOException {
 -    final int length = (int) (end - start + 1);
 -    actualGetFromOneDataNode(datanode, block, start, end, buf,
 -        new int[]{offset}, new int[]{length}, corruptedBlockMap);
 -  }
 -
 -  /**
     * Read data from one DataNode.
--   * @param datanode the datanode from which to read data
--   * @param block the located block containing the requested data
--   * @param startInBlk the startInBlk offset of the block
--   * @param endInBlk the endInBlk offset of the block
--   * @param buf the given byte array into which the data is read
-    * @param offset the offset in buf
 -   * @param offsets the data may be read into multiple segments of the buf
 -   *                (when reading a striped block). this array indicates the
 -   *                offset of each buf segment.
 -   * @param lengths the length of each buf segment
++   *
++   * @param datanode          the datanode from which to read data
++   * @param block             the located block containing the requested data
++   * @param startInBlk        the startInBlk offset of the block
++   * @param endInBlk          the endInBlk offset of the block
++   * @param buf               the given byte array into which the data is read
++   * @param offset            the offset in buf
     * @param corruptedBlockMap map recording list of datanodes with corrupted
     *                          block replica
     */
@@@ -1188,7 -1208,7 +1189,7 @@@
          throw new IOException(msg);
        } catch (IOException e) {
          if (e instanceof InvalidEncryptionKeyException && 
refetchEncryptionKey > 0) {
--          DFSClient.LOG.info("Will fetch a new encryption key and retry, " 
++          DFSClient.LOG.info("Will fetch a new encryption key and retry, "
                + "encryption key was invalid when connecting to " + 
datanode.addr
                + " : " + e);
            // The encryption key used is invalid.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b6a63bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 373ebdf,c16aef2..00f3a65
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@@ -137,7 -136,7 +136,7 @@@ public class DFSOutputStream extends FS
      }
  
      return new DFSPacket(buf, chunksPerPkt, offsetInBlock, seqno,
--                         getChecksumSize(), lastPacketInBlock);
++        getChecksumSize(), lastPacketInBlock);
    }
  
    @Override
@@@ -166,7 -165,7 +165,7 @@@
      return value;
    }
  
--  /** 
++  /**
     * @return the object for computing checksum.
     *         The type is NULL if checksum is not computed.
     */
@@@ -179,7 -178,7 +178,7 @@@
      }
      return checksum;
    }
-- 
++
    private DFSOutputStream(DFSClient dfsClient, String src, Progressable 
progress,
        HdfsFileStatus stat, DataChecksum checksum) throws IOException {
      super(getChecksum4Compute(checksum, stat));
@@@ -195,7 -194,7 +194,7 @@@
        DFSClient.LOG.debug(
            "Set non-null progress callback on DFSOutputStream " + src);
      }
--    
++
      this.bytesPerChecksum = checksum.getBytesPerChecksum();
      if (bytesPerChecksum <= 0) {
        throw new HadoopIllegalArgumentException(
@@@ -289,7 -282,7 +288,7 @@@
    private DFSOutputStream(DFSClient dfsClient, String src,
        EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock 
lastBlock,
        HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
--          throws IOException {
++      throws IOException {
      this(dfsClient, src, progress, stat, checksum);
      initialFileSize = stat.getLen(); // length of file when opened
      this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);
@@@ -357,9 -350,6 +356,9 @@@
        String[] favoredNodes) throws IOException {
      TraceScope scope =
          dfsClient.getPathTraceScope("newStreamForAppend", src);
-       if(stat.getReplication() == 0) {
++    if(stat.getErasureCodingPolicy() != null) {
 +      throw new IOException("Not support appending to a striping layout file 
yet.");
 +    }
      try {
        final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
            progress, lastBlock, stat, checksum, favoredNodes);
@@@ -405,10 -395,10 +404,10 @@@
      }
  
      if (currentPacket == null) {
-       currentPacket = createPacket(packetSize, chunksPerPacket, 
-           streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), 
false);
+       currentPacket = createPacket(packetSize, chunksPerPacket, getStreamer()
+           .getBytesCurBlock(), getStreamer().getAndIncCurrentSeqno(), false);
        if (DFSClient.LOG.isDebugEnabled()) {
--        DFSClient.LOG.debug("DFSClient writeChunk allocating new packet 
seqno=" + 
++        DFSClient.LOG.debug("DFSClient writeChunk allocating new packet 
seqno=" +
              currentPacket.getSeqno() +
              ", src=" + src +
              ", packetSize=" + packetSize +
@@@ -420,11 -410,12 +419,11 @@@
      currentPacket.writeChecksum(checksum, ckoff, cklen);
      currentPacket.writeData(b, offset, len);
      currentPacket.incNumChunks();
-     streamer.incBytesCurBlock(len);
+     getStreamer().incBytesCurBlock(len);
  
      // If packet is full, enqueue it for transmission
 -    //
      if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
-         streamer.getBytesCurBlock() == blockSize) {
+         getStreamer().getBytesCurBlock() == blockSize) {
        enqueueCurrentPacketFull();
      }
    }
@@@ -435,13 -426,10 +434,10 @@@
    }
  
    void enqueueCurrentPacketFull() throws IOException {
-     if (LOG.isDebugEnabled()) {
-       LOG.debug("enqueue full " + currentPacket + ", src=" + src
-           + ", bytesCurBlock=" + streamer.getBytesCurBlock()
-           + ", blockSize=" + blockSize
-           + ", appendChunk=" + streamer.getAppendChunk()
-           + ", " + streamer);
-     }
+     LOG.debug("enqueue full {}, src={}, bytesCurBlock={}, blockSize={},"
 -        + " appendChunk={}, {}", currentPacket, src, getStreamer()
 -        .getBytesCurBlock(), blockSize, getStreamer().getAppendChunk(),
++            + " appendChunk={}, {}", currentPacket, src, getStreamer()
++            .getBytesCurBlock(), blockSize, getStreamer().getAppendChunk(),
+         getStreamer());
      enqueueCurrentPacket();
      adjustChunkBoundary();
      endBlock();
@@@ -487,7 -475,7 +483,7 @@@
        lastFlushOffset = 0;
      }
    }
--  
++
    /**
     * Flushes out to all replicas of the block. The data is in the buffers
     * of the DNs but not necessarily in the DN's OS buffers.
@@@ -519,16 -507,16 +515,16 @@@
        scope.close();
      }
    }
--  
++
    /**
     * The expected semantics is all data have flushed out to all replicas 
     * and all replicas have done posix fsync equivalent - ie the OS has 
     * flushed it to the disk device (but the disk may have it in its cache).
--   * 
++   *
     * Note that only the current block is flushed to the disk device.
     * To guarantee durable sync across block boundaries the stream should
     * be created with {@link CreateFlag#SYNC_BLOCK}.
--   * 
++   *
     * @param syncFlags
     *          Indicate the semantic of the sync. Currently used to specify
     *          whether or not to update the block length in NameNode.
@@@ -545,7 -533,7 +541,7 @@@
  
    /**
     * Flush/Sync buffered data to DataNodes.
--   * 
++   *
     * @param isSync
     *          Whether or not to require all replicas to flush data to the disk
     *          device
@@@ -686,7 -679,7 +687,7 @@@
    /**
     * Note that this is not a public API;
     * use {@link HdfsDataOutputStream#getCurrentBlockReplication()} instead.
--   * 
++   *
     * @return the number of valid replicas of the current block
     */
    public synchronized int getCurrentBlockReplication() throws IOException {
@@@ -701,7 -694,7 +702,7 @@@
      }
      return currentNodes.length;
    }
--  
++
    /**
     * Waits till all existing data is flushed and confirmations 
     * received from datanodes. 
@@@ -723,9 -716,9 +724,9 @@@
    }
  
    protected synchronized void start() {
-     streamer.start();
+     getStreamer().start();
    }
--  
++
    /**
     * Aborts this output stream and releases any system 
     * resources associated with this stream.
@@@ -763,7 -756,7 +764,7 @@@
        setClosed();
      }
    }
--  
++
    /**
     * Closes this output stream and releases any system 
     * resources associated with this stream.
@@@ -894,7 -887,7 +895,7 @@@
      do {
        prevStrategy = this.cachingStrategy.get();
        nextStrategy = new CachingStrategy.Builder(prevStrategy).
--                        setDropBehind(dropBehind).build();
++          setDropBehind(dropBehind).build();
      } while (!this.cachingStrategy.compareAndSet(prevStrategy, nextStrategy));
    }
  
@@@ -908,8 -901,10 +909,15 @@@
      return fileId;
    }
  
+   /**
+    * Returns the data streamer object.
+    */
+   protected DataStreamer getStreamer() {
+     return streamer;
+   }
++
 +  @Override
 +  public String toString() {
 +    return getClass().getSimpleName() + ":" + streamer;
 +  }
  }

Reply via email to