hadoop git commit: YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference. Contributed by Chengbing Liu.

2015-06-12 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk 83e8110f8 - d8dcfa98e


YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference.
Contributed by Chengbing Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8dcfa98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8dcfa98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8dcfa98

Branch: refs/heads/trunk
Commit: d8dcfa98e3ca6a6fea414fd503589bb83b7a9c51
Parents: 83e8110
Author: Devaraj K deva...@apache.org
Authored: Fri Jun 12 13:42:49 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Fri Jun 12 13:42:49 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/resourcemanager/TestRMEmbeddedElector.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8dcfa98/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dc43ad8..f5780c9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -521,6 +521,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3778. Fix Yarn resourcemanger CLI usage. (Brahma Reddy Battula via 
xgong)
 
+YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference.
+(Chengbing Liu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8dcfa98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 1b0bf7e..20b1c0e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -102,9 +102,9 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 ServiceFailedException {
   try {
 callbackCalled.set(true);
-LOG.info(Callback called. Sleeping now);
+TestRMEmbeddedElector.LOG.info(Callback called. Sleeping 
now);
 Thread.sleep(delayMs);
-LOG.info(Sleep done);
+TestRMEmbeddedElector.LOG.info(Sleep done);
   } catch (InterruptedException e) {
 e.printStackTrace();
   }



hadoop git commit: YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference. Contributed by Chengbing Liu.

2015-06-12 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f1ee2eaf8 - 3863342ca


YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference.
Contributed by Chengbing Liu.

(cherry picked from commit d8dcfa98e3ca6a6fea414fd503589bb83b7a9c51)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3863342c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3863342c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3863342c

Branch: refs/heads/branch-2
Commit: 3863342ca91494a311cf5524f0494a66a6fca842
Parents: f1ee2ea
Author: Devaraj K deva...@apache.org
Authored: Fri Jun 12 13:42:49 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Fri Jun 12 13:45:05 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/resourcemanager/TestRMEmbeddedElector.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3863342c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4a83ed5..30035c7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -473,6 +473,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3778. Fix Yarn resourcemanger CLI usage. (Brahma Reddy Battula via 
xgong)
 
+YARN-3794. TestRMEmbeddedElector fails because of ambiguous LOG reference.
+(Chengbing Liu via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3863342c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 1b0bf7e..20b1c0e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -102,9 +102,9 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 ServiceFailedException {
   try {
 callbackCalled.set(true);
-LOG.info(Callback called. Sleeping now);
+TestRMEmbeddedElector.LOG.info(Callback called. Sleeping 
now);
 Thread.sleep(delayMs);
-LOG.info(Sleep done);
+TestRMEmbeddedElector.LOG.info(Sleep done);
   } catch (InterruptedException e) {
 e.printStackTrace();
   }



hadoop git commit: HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from StripedBlockProto. Contributed by Yi Liu.

2015-06-12 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 98d340745 - 683332b36


HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from 
StripedBlockProto. Contributed by Yi Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/683332b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/683332b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/683332b3

Branch: refs/heads/HDFS-7285
Commit: 683332b36de1040eb8901d676e666527e8c5f8fe
Parents: 98d3407
Author: Jing Zhao ji...@apache.org
Authored: Fri Jun 12 14:48:53 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Fri Jun 12 14:48:53 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 16 --
 .../server/namenode/FSImageFormatPBINode.java   | 23 ++--
 .../tools/offlineImageViewer/FSImageLoader.java | 20 ++---
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  2 +-
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 10 -
 ...TestOfflineImageViewerWithStripedBlocks.java | 14 +---
 7 files changed, 23 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/683332b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index fa39d72..2eb8259 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -296,3 +296,6 @@
 
 HDFS-8450. Erasure Coding: Consolidate erasure coding zone related
 implementation into a single class (Rakesh R via vinayakumarb)
+
+HDFS-8585. Erasure Coding: Remove dataBlockNum and parityBlockNum from
+StripedBlockProto. (Yi Liu via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/683332b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 0bfc3bb..7ee6112 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -183,7 +183,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
@@ -195,7 +194,6 @@ import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
@@ -444,20 +442,6 @@ public class PBHelper {
 return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
   }
 
-  public static BlockInfoStriped convert(StripedBlockProto p, ECSchema schema) 
{
-return new BlockInfoStriped(convert(p.getBlock()), schema);
-  }
-
-  public static StripedBlockProto convert(BlockInfoStriped blk) {
-BlockProto bp = BlockProto.newBuilder().setBlockId(blk.getBlockId())
-.setGenStamp(blk.getGenerationStamp()).setNumBytes(blk.getNumBytes())
-.build();
-return StripedBlockProto.newBuilder()
-.setDataBlockNum(blk.getDataBlockNum())
-.setParityBlockNum(blk.getParityBlockNum())
-.setBlock(bp).build();
-  }
-
   public static BlockWithLocationsProto convert(BlockWithLocations blk) {
 BlockWithLocationsProto.Builder builder = BlockWithLocationsProto
 .newBuilder().setBlock(convert(blk.getBlock()))


hadoop git commit: HDFS-8572. DN always uses HTTP/localhost@REALM principals in SPNEGO. Contributed by Haohui Mai.

2015-06-12 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk c17439c2d - eef7b50e2


HDFS-8572. DN always uses HTTP/localhost@REALM principals in SPNEGO. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eef7b50e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eef7b50e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eef7b50e

Branch: refs/heads/trunk
Commit: eef7b50e23f9960e4bb61d9db6754a2300bc06eb
Parents: c17439c
Author: Haohui Mai whe...@apache.org
Authored: Thu Jun 11 18:53:29 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Fri Jun 12 14:45:34 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 36 ++
 .../server/datanode/web/DatanodeHttpServer.java | 52 ++--
 3 files changed, 55 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eef7b50e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 033451e..51a0897 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -996,6 +996,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8583. Document that NFS gateway does not work with rpcbind
 on SLES 11. (Arpit Agarwal)
 
+HDFS-8572. DN always uses HTTP/localhost@REALM principals in SPNEGO.
+(wheat9)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eef7b50e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f73eb66..ed2925b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
+
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
@@ -148,7 +148,6 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import 
org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
@@ -163,7 +162,6 @@ import 
org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -180,7 +178,6 @@ import org.apache.hadoop.security.SaslPropertiesResolver;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.tracing.SpanReceiverHost;
@@ -299,7 +296,6 @@ public class DataNode extends ReconfigurableBase
   private volatile boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
 
-  private HttpServer2 infoServer = null;
   private DatanodeHttpServer httpServer = null;
   private int infoPort;
   private int infoSecurePort;
@@ -761,29 +757,12 @@ public class DataNode extends ReconfigurableBase
*/
   private void startInfoServer(Configuration conf)
 throws IOException {
-Configuration confForInfoServer = new Configuration(conf);
-confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
-HttpServer2.Builder builder 

hadoop git commit: HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to hadoop-common. Contributed by Masatake Iwasaki.

2015-06-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk d8dcfa98e - e4489d97e


HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to 
hadoop-common. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4489d97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4489d97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4489d97

Branch: refs/heads/trunk
Commit: e4489d97e5f0cec601b93e1b883fa194a7353229
Parents: d8dcfa9
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Jun 12 07:25:15 2015 -0700
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri Jun 12 07:25:15 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/tracing/SetSpanReceiver.java  | 109 +++
 .../org/apache/hadoop/tracing/TestTracing.java  |  94 ++--
 .../TestTracingShortCircuitLocalRead.java   |   4 +-
 4 files changed, 124 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4489d97/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4df6112..268b1db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -645,6 +645,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12055. Deprecate usage of NativeIO#link. (Andrew Wang via cnauroth)
 
+HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to
+hadoop-common. (Masatake Iwasaki via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4489d97/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
new file mode 100644
index 000..e242b74
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tracing;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.htrace.Span;
+import org.apache.htrace.SpanReceiver;
+import org.apache.htrace.HTraceConfiguration;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeoutException;
+import org.junit.Assert;
+
+/**
+ * Span receiver that puts all spans into a single set.
+ * This is useful for testing.
+ * p/
+ * We're not using HTrace's POJOReceiver here so as that doesn't
+ * push all the metrics to a static place, and would make testing
+ * SpanReceiverHost harder.
+ */
+public class SetSpanReceiver implements SpanReceiver {
+
+  public SetSpanReceiver(HTraceConfiguration conf) {
+  }
+
+  public void receiveSpan(Span span) {
+SetHolder.spans.put(span.getSpanId(), span);
+  }
+
+  public void close() {
+  }
+
+  public static void clear() {
+SetHolder.spans.clear();
+  }
+
+  public static int size() {
+return SetHolder.spans.size();
+  }
+
+  public static CollectionSpan getSpans() {
+return SetHolder.spans.values();
+  }
+
+  public static MapString, ListSpan getMap() {
+return SetHolder.getMap();
+  }
+
+  public static class SetHolder {
+public static ConcurrentHashMapLong, Span spans =
+new ConcurrentHashMapLong, Span();
+
+public static MapString, ListSpan getMap() {
+  MapString, ListSpan map = new HashMapString, ListSpan();
+
+  for 

hadoop git commit: HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to hadoop-common. Contributed by Masatake Iwasaki.

2015-06-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3863342ca - e397cca45


HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to 
hadoop-common. Contributed by Masatake Iwasaki.

(cherry picked from commit e4489d97e5f0cec601b93e1b883fa194a7353229)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e397cca4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e397cca4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e397cca4

Branch: refs/heads/branch-2
Commit: e397cca4569e17529a51d95142ebc1cf4c00e31b
Parents: 3863342
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Jun 12 07:25:15 2015 -0700
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri Jun 12 07:26:05 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../apache/hadoop/tracing/SetSpanReceiver.java  | 109 +++
 .../org/apache/hadoop/tracing/TestTracing.java  |  94 ++--
 .../TestTracingShortCircuitLocalRead.java   |   4 +-
 4 files changed, 124 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e397cca4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8b5f218..70f7da5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -152,6 +152,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-12055. Deprecate usage of NativeIO#link. (Andrew Wang via cnauroth)
 
+HADOOP-11971. Move test utilities for tracing from hadoop-hdfs to
+hadoop-common. (Masatake Iwasaki via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e397cca4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
new file mode 100644
index 000..e242b74
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/tracing/SetSpanReceiver.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tracing;
+
+import com.google.common.base.Supplier;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.htrace.Span;
+import org.apache.htrace.SpanReceiver;
+import org.apache.htrace.HTraceConfiguration;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeoutException;
+import org.junit.Assert;
+
+/**
+ * Span receiver that puts all spans into a single set.
+ * This is useful for testing.
+ * p/
+ * We're not using HTrace's POJOReceiver here so as that doesn't
+ * push all the metrics to a static place, and would make testing
+ * SpanReceiverHost harder.
+ */
+public class SetSpanReceiver implements SpanReceiver {
+
+  public SetSpanReceiver(HTraceConfiguration conf) {
+  }
+
+  public void receiveSpan(Span span) {
+SetHolder.spans.put(span.getSpanId(), span);
+  }
+
+  public void close() {
+  }
+
+  public static void clear() {
+SetHolder.spans.clear();
+  }
+
+  public static int size() {
+return SetHolder.spans.size();
+  }
+
+  public static CollectionSpan getSpans() {
+return SetHolder.spans.values();
+  }
+
+  public static MapString, ListSpan getMap() {
+return SetHolder.getMap();
+  }
+
+  public static class SetHolder {
+public static ConcurrentHashMapLong, Span spans =
+new ConcurrentHashMapLong, Span();
+
+public static MapString, ListSpan getMap() {

[1/2] hadoop git commit: HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe)

2015-06-12 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e397cca45 - 378bb484b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/378bb484/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 92c329e..ff70c3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -109,7 +109,7 @@ public class TestDeadDatanode {
 BlockListAsLongs.EMPTY) };
 try {
   dnp.blockReport(reg, poolId, report,
-  new BlockReportContext(1, 0, System.nanoTime()));
+  new BlockReportContext(1, 0, System.nanoTime(), 0L));
   fail(Expected IOException is not thrown);
 } catch (IOException ex) {
   // Expected
@@ -120,8 +120,8 @@ public class TestDeadDatanode {
 StorageReport[] rep = { new StorageReport(
 new DatanodeStorage(reg.getDatanodeUuid()),
 false, 0, 0, 0, 0) };
-DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null)
-.getCommands();
+DatanodeCommand[] cmd =
+dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
 assertEquals(1, cmd.length);
 assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
 .getAction());



[1/2] hadoop git commit: HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.

2015-06-12 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 378bb484b - 2776255ed


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2776255e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 6b8388e..d081a6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -22,7 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -133,7 +133,7 @@ public class FileDiffList extends
 Block dontRemoveBlock = null;
 if (lastBlock != null  lastBlock.getBlockUCState().equals(
 HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
-  dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
+  dontRemoveBlock = ((BlockInfoUnderConstruction) lastBlock)
   .getTruncateBlock();
 }
 // Collect the remaining blocks of the file, ignoring truncate block

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2776255e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 6627281..1011913 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -109,7 +109,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -1620,9 +1620,9 @@ public class DFSTestUtil {
 BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
 assertTrue(Block  + blk +  should be under construction,  +
 got:  + storedBlock,
-storedBlock instanceof BlockInfoContiguousUnderConstruction);
-BlockInfoContiguousUnderConstruction ucBlock =
-  (BlockInfoContiguousUnderConstruction)storedBlock;
+storedBlock instanceof BlockInfoUnderConstruction);
+BlockInfoUnderConstruction ucBlock =
+  (BlockInfoUnderConstruction)storedBlock;
 // We expect that the replica with the most recent heart beat will be
 // the one to be in charge of the synchronization / recovery protocol.
 final DatanodeStorageInfo[] storages = 
ucBlock.getExpectedStorageLocations();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2776255e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index a7ba293..630cd1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import 

[1/2] hadoop git commit: HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe)

2015-06-12 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk e4489d97e - 12b5b06c0


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b5b06c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 92c329e..ff70c3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -109,7 +109,7 @@ public class TestDeadDatanode {
 BlockListAsLongs.EMPTY) };
 try {
   dnp.blockReport(reg, poolId, report,
-  new BlockReportContext(1, 0, System.nanoTime()));
+  new BlockReportContext(1, 0, System.nanoTime(), 0L));
   fail(Expected IOException is not thrown);
 } catch (IOException ex) {
   // Expected
@@ -120,8 +120,8 @@ public class TestDeadDatanode {
 StorageReport[] rep = { new StorageReport(
 new DatanodeStorage(reg.getDatanodeUuid()),
 false, 0, 0, 0, 0) };
-DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null)
-.getCommands();
+DatanodeCommand[] cmd =
+dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
 assertEquals(1, cmd.length);
 assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
 .getAction());



[2/2] hadoop git commit: HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe)

2015-06-12 Thread cmccabe
HDFS-7923. The DataNodes should rate-limit their full block reports by asking 
the NN on heartbeat messages (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/12b5b06c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/12b5b06c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/12b5b06c

Branch: refs/heads/trunk
Commit: 12b5b06c063d93e6c683c9b6fac9a96912f59e59
Parents: e4489d9
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Jun 12 11:17:51 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Fri Jun 12 11:17:51 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 +
 .../DatanodeProtocolClientSideTranslatorPB.java |   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   3 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   3 +-
 .../server/blockmanagement/BlockManager.java|  41 ++-
 .../BlockManagerFaultInjector.java  |  52 +++
 .../BlockReportLeaseManager.java| 355 +++
 .../server/blockmanagement/DatanodeManager.java |   2 +
 .../hdfs/server/datanode/BPServiceActor.java|  71 +++-
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   9 +-
 .../server/protocol/BlockReportContext.java |  25 +-
 .../hdfs/server/protocol/DatanodeProtocol.java  |   5 +-
 .../hdfs/server/protocol/HeartbeatResponse.java |  10 +-
 .../hdfs/server/protocol/RegisterCommand.java   |   2 +-
 .../src/main/proto/DatanodeProtocol.proto   |   6 +
 .../src/main/resources/hdfs-default.xml |  21 ++
 .../hdfs/protocol/TestBlockListAsLongs.java |   4 +-
 .../TestBlockReportRateLimiting.java| 246 +
 .../blockmanagement/TestDatanodeManager.java|  21 +-
 .../TestNameNodePrunesMissingStorages.java  |   2 +-
 .../server/datanode/TestBPOfferService.java |   7 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   2 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   6 +-
 .../datanode/TestBpServiceActorScheduler.java   |   2 +-
 .../TestDatanodeProtocolRetryPolicy.java|   8 +-
 .../server/datanode/TestFsDatasetCache.java |   9 +-
 .../TestNNHandlesBlockReportPerStorage.java |   2 +-
 .../TestNNHandlesCombinedBlockReport.java   |   2 +-
 .../hdfs/server/datanode/TestStorageReport.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   2 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   6 +-
 34 files changed, 890 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b5b06c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 5bb6e53..3f72608 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -434,6 +434,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
   public static final String  DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY = 
dfs.blockreport.split.threshold;
   public static final longDFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT = 1000 * 
1000;
+  public static final String  DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES = 
dfs.namenode.max.full.block.report.leases;
+  public static final int 
DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES_DEFAULT = 6;
+  public static final String  DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS = 
dfs.namenode.full.block.report.lease.length.ms;
+  public static final long
DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS_DEFAULT = 5L * 60L * 1000L;
   public static final String  DFS_CACHEREPORT_INTERVAL_MSEC_KEY = 
dfs.cachereport.intervalMsec;
   public static final longDFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 
1000;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = 
dfs.block.invalidate.limit;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/12b5b06c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 

hadoop git commit: Add HDFS-7923 to CHANGES.txt

2015-06-12 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 12b5b06c0 - 46b0b4179


Add HDFS-7923 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46b0b417
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46b0b417
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46b0b417

Branch: refs/heads/trunk
Commit: 46b0b4179c1ef1a1510eb04e40b11968a24df485
Parents: 12b5b06
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Jun 12 11:28:18 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Fri Jun 12 11:28:18 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46b0b417/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b388f69..e315db6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -623,6 +623,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8573. Move creation of restartMeta file logic from BlockReceiver to
 ReplicaInPipeline. (Eddy Xu via wang)
 
+HDFS-7923. The DataNodes should rate-limit their full block reports by
+asking the NN on heartbeat messages (cmccabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than



[2/2] hadoop git commit: HDFS-7923. The DataNodes should rate-limit their full block reports by asking the NN on heartbeat messages (cmccabe)

2015-06-12 Thread cmccabe
HDFS-7923. The DataNodes should rate-limit their full block reports by asking 
the NN on heartbeat messages (cmccabe)

(cherry picked from commit 12b5b06c063d93e6c683c9b6fac9a96912f59e59)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/378bb484
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/378bb484
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/378bb484

Branch: refs/heads/branch-2
Commit: 378bb484bbcb1921729cf88e718cccfc8986a716
Parents: e397cca
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Jun 12 11:17:51 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Fri Jun 12 11:29:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 +
 .../DatanodeProtocolClientSideTranslatorPB.java |   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   3 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   3 +-
 .../server/blockmanagement/BlockManager.java|  41 ++-
 .../BlockManagerFaultInjector.java  |  52 +++
 .../BlockReportLeaseManager.java| 355 +++
 .../server/blockmanagement/DatanodeManager.java |   2 +
 .../hdfs/server/datanode/BPServiceActor.java|  71 +++-
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   9 +-
 .../server/protocol/BlockReportContext.java |  25 +-
 .../hdfs/server/protocol/DatanodeProtocol.java  |   5 +-
 .../hdfs/server/protocol/HeartbeatResponse.java |  10 +-
 .../hdfs/server/protocol/RegisterCommand.java   |   2 +-
 .../src/main/proto/DatanodeProtocol.proto   |   6 +
 .../src/main/resources/hdfs-default.xml |  21 ++
 .../hdfs/protocol/TestBlockListAsLongs.java |   4 +-
 .../TestBlockReportRateLimiting.java| 246 +
 .../blockmanagement/TestDatanodeManager.java|  21 +-
 .../TestNameNodePrunesMissingStorages.java  |   2 +-
 .../server/datanode/TestBPOfferService.java |   7 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   2 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   6 +-
 .../datanode/TestBpServiceActorScheduler.java   |   2 +-
 .../TestDatanodeProtocolRetryPolicy.java|   8 +-
 .../server/datanode/TestFsDatasetCache.java |   9 +-
 .../TestNNHandlesBlockReportPerStorage.java |   2 +-
 .../TestNNHandlesCombinedBlockReport.java   |   2 +-
 .../hdfs/server/datanode/TestStorageReport.java |   2 +-
 .../server/namenode/NNThroughputBenchmark.java  |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   2 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   6 +-
 35 files changed, 893 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/378bb484/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0368f29..39dee78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -283,6 +283,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8573. Move creation of restartMeta file logic from BlockReceiver to
 ReplicaInPipeline. (Eddy Xu via wang)
 
+HDFS-7923. The DataNodes should rate-limit their full block reports by
+asking the NN on heartbeat messages (cmccabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/378bb484/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index dc0ecca..5295a2f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -436,6 +436,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
   public static final String  DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY = 
dfs.blockreport.split.threshold;
   public static final longDFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT = 1000 * 
1000;
+  public static final String  DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES = 
dfs.namenode.max.full.block.report.leases;
+  public static final int 
DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES_DEFAULT = 6;
+  public static final 

[2/2] hadoop git commit: HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.

2015-06-12 Thread wang
HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. 
Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c17439c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c17439c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c17439c2

Branch: refs/heads/trunk
Commit: c17439c2ddd921b63b1635e6f1cba634b8da8557
Parents: 46b0b41
Author: Andrew Wang w...@apache.org
Authored: Fri Jun 12 11:35:39 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Fri Jun 12 11:38:39 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockCollection.java |   2 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  |  24 +-
 .../blockmanagement/BlockInfoContiguous.java|  77 +---
 .../BlockInfoContiguousUnderConstruction.java   | 403 --
 .../BlockInfoUnderConstruction.java | 405 +++
 .../BlockInfoUnderConstructionContiguous.java   | 110 +
 .../server/blockmanagement/BlockManager.java|  40 +-
 .../ContiguousBlockStorageOp.java   | 106 +
 .../blockmanagement/DatanodeDescriptor.java |  13 +-
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  15 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  15 +-
 .../hdfs/server/namenode/FSImageFormat.java |   7 +-
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../server/namenode/FSImageSerialization.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +-
 .../namenode/FileUnderConstructionFeature.java  |  10 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  14 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   4 +-
 .../server/namenode/snapshot/FileDiffList.java  |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   8 +-
 .../TestBlockInfoUnderConstruction.java |   6 +-
 .../blockmanagement/TestBlockManager.java   |   6 +-
 .../blockmanagement/TestHeartbeatHandling.java  |   8 +-
 .../blockmanagement/TestReplicationPolicy.java  |   5 +-
 .../namenode/TestBlockUnderConstruction.java|   4 +-
 .../TestCommitBlockSynchronization.java |   9 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |   6 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   6 +-
 .../namenode/snapshot/SnapshotTestHelper.java   |   4 +-
 31 files changed, 769 insertions(+), 583 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e315db6..033451e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -626,6 +626,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7923. The DataNodes should rate-limit their full block reports by
 asking the NN on heartbeat messages (cmccabe)
 
+HDFS-8499. Refactor BlockInfo class hierarchy with static helper class.
+(Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 02a1d05..f11a825 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -79,7 +79,7 @@ public interface BlockCollection {
* Convert the last block of the collection to an under-construction block
* and set the locations.
*/
-  public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock,
+  public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
   DatanodeStorageInfo[] targets) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 

[1/2] hadoop git commit: HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.

2015-06-12 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 46b0b4179 - c17439c2d


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 6b8388e..d081a6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -22,7 +22,7 @@ import java.util.List;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -133,7 +133,7 @@ public class FileDiffList extends
 Block dontRemoveBlock = null;
 if (lastBlock != null  lastBlock.getBlockUCState().equals(
 HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
-  dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
+  dontRemoveBlock = ((BlockInfoUnderConstruction) lastBlock)
   .getTruncateBlock();
 }
 // Collect the remaining blocks of the file, ignoring truncate block

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 50b85c0..d06b024 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -109,7 +109,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
-import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -1612,9 +1612,9 @@ public class DFSTestUtil {
 BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
 assertTrue(Block  + blk +  should be under construction,  +
 got:  + storedBlock,
-storedBlock instanceof BlockInfoContiguousUnderConstruction);
-BlockInfoContiguousUnderConstruction ucBlock =
-  (BlockInfoContiguousUnderConstruction)storedBlock;
+storedBlock instanceof BlockInfoUnderConstruction);
+BlockInfoUnderConstruction ucBlock =
+  (BlockInfoUnderConstruction)storedBlock;
 // We expect that the replica with the most recent heart beat will be
 // the one to be in charge of the synchronization / recovery protocol.
 final DatanodeStorageInfo[] storages = 
ucBlock.getExpectedStorageLocations();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17439c2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
index a7ba293..630cd1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoUnderConstruction.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import 

[2/2] hadoop git commit: HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.

2015-06-12 Thread wang
HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. 
Contributed by Zhe Zhang.

(cherry picked from commit 355e98ab7c7dd997378a8624f642199713467fb8)

 Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2776255e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2776255e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2776255e

Branch: refs/heads/branch-2
Commit: 2776255ed64aa4888e8e9a3ea8597df47fa37dae
Parents: 378bb48
Author: Andrew Wang w...@apache.org
Authored: Fri Jun 12 11:35:39 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Fri Jun 12 11:39:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../server/blockmanagement/BlockCollection.java |   2 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  |  24 +-
 .../blockmanagement/BlockInfoContiguous.java|  77 +---
 .../BlockInfoContiguousUnderConstruction.java   | 403 --
 .../BlockInfoUnderConstruction.java | 405 +++
 .../BlockInfoUnderConstructionContiguous.java   | 110 +
 .../server/blockmanagement/BlockManager.java|  40 +-
 .../ContiguousBlockStorageOp.java   | 106 +
 .../blockmanagement/DatanodeDescriptor.java |  13 +-
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  15 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  15 +-
 .../hdfs/server/namenode/FSImageFormat.java |   7 +-
 .../server/namenode/FSImageFormatPBINode.java   |   6 +-
 .../server/namenode/FSImageSerialization.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +-
 .../namenode/FileUnderConstructionFeature.java  |  10 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  14 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   4 +-
 .../server/namenode/snapshot/FileDiffList.java  |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   8 +-
 .../TestBlockInfoUnderConstruction.java |   6 +-
 .../blockmanagement/TestBlockManager.java   |   6 +-
 .../blockmanagement/TestHeartbeatHandling.java  |   8 +-
 .../namenode/TestBlockUnderConstruction.java|   4 +-
 .../TestCommitBlockSynchronization.java |   9 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |   6 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |   6 +-
 .../namenode/snapshot/SnapshotTestHelper.java   |   4 +-
 30 files changed, 766 insertions(+), 581 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2776255e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 39dee78..e5f3f14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -286,6 +286,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7923. The DataNodes should rate-limit their full block reports by
 asking the NN on heartbeat messages (cmccabe)
 
+HDFS-8499. Refactor BlockInfo class hierarchy with static helper class.
+(Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2776255e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 02a1d05..f11a825 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -79,7 +79,7 @@ public interface BlockCollection {
* Convert the last block of the collection to an under-construction block
* and set the locations.
*/
-  public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock,
+  public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
   DatanodeStorageInfo[] targets) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2776255e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git